1
mirror of https://github.com/rclone/rclone synced 2024-11-26 04:07:22 +01:00

Update vendor directory

This commit is contained in:
Nick Craig-Wood 2016-12-04 16:25:30 +00:00
parent c265f451f2
commit 34b9ac8a5d
54 changed files with 4253 additions and 283 deletions

220
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{ {
"ImportPath": "github.com/ncw/rclone", "ImportPath": "github.com/ncw/rclone",
"GoVersion": "go1.7", "GoVersion": "devel-41908a5",
"GodepVersion": "v75", "GodepVersion": "v75",
"Packages": [ "Packages": [
"./..." "./..."
@ -20,17 +20,17 @@
}, },
{ {
"ImportPath": "cloud.google.com/go/compute/metadata", "ImportPath": "cloud.google.com/go/compute/metadata",
"Comment": "v0.4.0-56-g5aca3b7", "Comment": "v0.5.0-11-gbfdc39b",
"Rev": "5aca3b7200b228d4b47e9e511f1d77ac270855ff" "Rev": "bfdc39bde7a3c8dd361ea7fcdb69e7a41a475bb9"
}, },
{ {
"ImportPath": "cloud.google.com/go/internal", "ImportPath": "cloud.google.com/go/internal",
"Comment": "v0.4.0-56-g5aca3b7", "Comment": "v0.5.0-11-gbfdc39b",
"Rev": "5aca3b7200b228d4b47e9e511f1d77ac270855ff" "Rev": "bfdc39bde7a3c8dd361ea7fcdb69e7a41a475bb9"
}, },
{ {
"ImportPath": "github.com/Unknwon/goconfig", "ImportPath": "github.com/Unknwon/goconfig",
"Rev": "5aa4f8cd5a472c2411c778b4680f59f2223f1966" "Rev": "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
}, },
{ {
"ImportPath": "github.com/VividCortex/ewma", "ImportPath": "github.com/VividCortex/ewma",
@ -39,138 +39,138 @@
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws", "ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/client", "ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/request", "ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/session", "ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter", "ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/service/s3", "ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface", "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager", "ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/aws/aws-sdk-go/service/sts", "ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.5.8-1-ga0a0426", "Comment": "v1.5.13-3-gc50c370",
"Rev": "a0a042689f81e61e24b16237b502bcbc4a05dfc0" "Rev": "c50c37095f7ff735e74842842f9ec157c6300273"
}, },
{ {
"ImportPath": "github.com/cpuguy83/go-md2man/md2man", "ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@ -213,7 +213,7 @@
}, },
{ {
"ImportPath": "github.com/ncw/swift", "ImportPath": "github.com/ncw/swift",
"Rev": "b964f2ca856aac39885e258ad25aec08d5f64ee6" "Rev": "cefd25ce9a2fdb37e4deecc743a2c58cafecdf3f"
}, },
{ {
"ImportPath": "github.com/pkg/errors", "ImportPath": "github.com/pkg/errors",
@ -274,111 +274,111 @@
}, },
{ {
"ImportPath": "golang.org/x/crypto/nacl/secretbox", "ImportPath": "golang.org/x/crypto/nacl/secretbox",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/crypto/pbkdf2", "ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/crypto/poly1305", "ImportPath": "golang.org/x/crypto/poly1305",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/crypto/salsa20/salsa", "ImportPath": "golang.org/x/crypto/salsa20/salsa",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/crypto/scrypt", "ImportPath": "golang.org/x/crypto/scrypt",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/crypto/ssh/terminal", "ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "9477e0b78b9ac3d0b03822fd95422e2fe07627cd" "Rev": "8a549a1948fc5271eb24f36dcb0d3b47dec75a16"
}, },
{ {
"ImportPath": "golang.org/x/net/context", "ImportPath": "golang.org/x/net/context",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/context/ctxhttp", "ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/http2", "ImportPath": "golang.org/x/net/http2",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/http2/hpack", "ImportPath": "golang.org/x/net/http2/hpack",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/idna", "ImportPath": "golang.org/x/net/idna",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/internal/timeseries", "ImportPath": "golang.org/x/net/internal/timeseries",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/lex/httplex", "ImportPath": "golang.org/x/net/lex/httplex",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/net/trace", "ImportPath": "golang.org/x/net/trace",
"Rev": "4971afdc2f162e82d185353533d3cf16188a9f4e" "Rev": "6cdc0daab091c43fc46193327e0047b4f883c613"
}, },
{ {
"ImportPath": "golang.org/x/oauth2", "ImportPath": "golang.org/x/oauth2",
"Rev": "d5040cddfc0da40b408c9a1da4728662435176a9" "Rev": "f6093e37b6cb4092101a298aba5d794eb570757f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2/google", "ImportPath": "golang.org/x/oauth2/google",
"Rev": "d5040cddfc0da40b408c9a1da4728662435176a9" "Rev": "f6093e37b6cb4092101a298aba5d794eb570757f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2/internal", "ImportPath": "golang.org/x/oauth2/internal",
"Rev": "d5040cddfc0da40b408c9a1da4728662435176a9" "Rev": "f6093e37b6cb4092101a298aba5d794eb570757f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2/jws", "ImportPath": "golang.org/x/oauth2/jws",
"Rev": "d5040cddfc0da40b408c9a1da4728662435176a9" "Rev": "f6093e37b6cb4092101a298aba5d794eb570757f"
}, },
{ {
"ImportPath": "golang.org/x/oauth2/jwt", "ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "d5040cddfc0da40b408c9a1da4728662435176a9" "Rev": "f6093e37b6cb4092101a298aba5d794eb570757f"
}, },
{ {
"ImportPath": "golang.org/x/sys/unix", "ImportPath": "golang.org/x/sys/unix",
"Rev": "b699b7032584f0953262cb2788a0ca19bb494703" "Rev": "ca83bd2cb9abb47839b50eb4da612f00158f5870"
}, },
{ {
"ImportPath": "golang.org/x/text/transform", "ImportPath": "golang.org/x/text/transform",
"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68" "Rev": "5c6cf4f9a2357d38515014cea8c488ed22bdab90"
}, },
{ {
"ImportPath": "golang.org/x/text/unicode/norm", "ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "a263ba8db058568bb9beba166777d9c9dbe75d68" "Rev": "5c6cf4f9a2357d38515014cea8c488ed22bdab90"
}, },
{ {
"ImportPath": "google.golang.org/api/drive/v2", "ImportPath": "google.golang.org/api/drive/v2",
"Rev": "e4c04685e5d7db47ff294aa9e514b3a638c431c9" "Rev": "eeba0f9a5982027f920574de995cd46f96058368"
}, },
{ {
"ImportPath": "google.golang.org/api/gensupport", "ImportPath": "google.golang.org/api/gensupport",
"Rev": "e4c04685e5d7db47ff294aa9e514b3a638c431c9" "Rev": "eeba0f9a5982027f920574de995cd46f96058368"
}, },
{ {
"ImportPath": "google.golang.org/api/googleapi", "ImportPath": "google.golang.org/api/googleapi",
"Rev": "e4c04685e5d7db47ff294aa9e514b3a638c431c9" "Rev": "eeba0f9a5982027f920574de995cd46f96058368"
}, },
{ {
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "e4c04685e5d7db47ff294aa9e514b3a638c431c9" "Rev": "eeba0f9a5982027f920574de995cd46f96058368"
}, },
{ {
"ImportPath": "google.golang.org/api/storage/v1", "ImportPath": "google.golang.org/api/storage/v1",
"Rev": "e4c04685e5d7db47ff294aa9e514b3a638c431c9" "Rev": "eeba0f9a5982027f920574de995cd46f96058368"
}, },
{ {
"ImportPath": "google.golang.org/appengine", "ImportPath": "google.golang.org/appengine",
@ -414,58 +414,58 @@
}, },
{ {
"ImportPath": "google.golang.org/grpc", "ImportPath": "google.golang.org/grpc",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/codes", "ImportPath": "google.golang.org/grpc/codes",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/credentials", "ImportPath": "google.golang.org/grpc/credentials",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/grpclog", "ImportPath": "google.golang.org/grpc/grpclog",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/internal", "ImportPath": "google.golang.org/grpc/internal",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/metadata", "ImportPath": "google.golang.org/grpc/metadata",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/naming", "ImportPath": "google.golang.org/grpc/naming",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/peer", "ImportPath": "google.golang.org/grpc/peer",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/stats", "ImportPath": "google.golang.org/grpc/stats",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/tap", "ImportPath": "google.golang.org/grpc/tap",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
}, },
{ {
"ImportPath": "google.golang.org/grpc/transport", "ImportPath": "google.golang.org/grpc/transport",
"Comment": "v1.0.4-61-g63bd55d", "Comment": "v1.0.4-78-g708a7f9",
"Rev": "63bd55dfbf781b183216d2dd4433a659c947648a" "Rev": "708a7f9f3283aa2d4f6132d287d78683babe55c8"
} }
] ]
} }

1
vendor/bazil.org/fuse/fuse.go generated vendored
View File

@ -1262,7 +1262,6 @@ func (r *StatfsRequest) Respond(resp *StatfsResponse) {
Bfree: resp.Bfree, Bfree: resp.Bfree,
Bavail: resp.Bavail, Bavail: resp.Bavail,
Files: resp.Files, Files: resp.Files,
Ffree: resp.Ffree,
Bsize: resp.Bsize, Bsize: resp.Bsize,
Namelen: resp.Namelen, Namelen: resp.Namelen,
Frsize: resp.Frsize, Frsize: resp.Frsize,

View File

@ -123,6 +123,11 @@ func (c *ConfigFile) DeleteKey(section, key string) bool {
section = DEFAULT_SECTION section = DEFAULT_SECTION
} }
if c.BlockMode {
c.lock.Lock()
defer c.lock.Unlock()
}
// Check if section exists. // Check if section exists.
if _, ok := c.data[section]; !ok { if _, ok := c.data[section]; !ok {
return false return false
@ -358,6 +363,11 @@ func (c *ConfigFile) GetKeyList(section string) []string {
section = DEFAULT_SECTION section = DEFAULT_SECTION
} }
if c.BlockMode {
c.lock.RLock()
defer c.lock.RUnlock()
}
// Check if section exists. // Check if section exists.
if _, ok := c.data[section]; !ok { if _, ok := c.data[section]; !ok {
return nil return nil
@ -382,6 +392,11 @@ func (c *ConfigFile) DeleteSection(section string) bool {
section = DEFAULT_SECTION section = DEFAULT_SECTION
} }
if c.BlockMode {
c.lock.Lock()
defer c.lock.Unlock()
}
// Check if section exists. // Check if section exists.
if _, ok := c.data[section]; !ok { if _, ok := c.data[section]; !ok {
return false return false
@ -412,6 +427,11 @@ func (c *ConfigFile) GetSection(section string) (map[string]string, error) {
section = DEFAULT_SECTION section = DEFAULT_SECTION
} }
if c.BlockMode {
c.lock.Lock()
defer c.lock.Unlock()
}
// Check if section exists. // Check if section exists.
if _, ok := c.data[section]; !ok { if _, ok := c.data[section]; !ok {
// Section does not exist. // Section does not exist.

View File

@ -71,7 +71,7 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's // ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request // signature doesn't expire before it is sent. This can happen when a request
// is built and signed signficantly before it is sent. Or signficant delays // is built and signed signficantly before it is sent. Or significant delays
// occur whne retrying requests that would cause the signature to expire. // occur whne retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{ var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler", Name: "core.ValidateReqSigHandler",

View File

@ -55,6 +55,8 @@ type Operation struct {
HTTPMethod string HTTPMethod string
HTTPPath string HTTPPath string
*Paginator *Paginator
BeforePresignFn func(r *Request) error
} }
// Paginator keeps track of pagination configuration for an API operation. // Paginator keeps track of pagination configuration for an API operation.
@ -149,6 +151,15 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
func (r *Request) Presign(expireTime time.Duration) (string, error) { func (r *Request) Presign(expireTime time.Duration) (string, error) {
r.ExpireTime = expireTime r.ExpireTime = expireTime
r.NotHoist = false r.NotHoist = false
if r.Operation.BeforePresignFn != nil {
r = r.copy()
err := r.Operation.BeforePresignFn(r)
if err != nil {
return "", err
}
}
r.Sign() r.Sign()
if r.Error != nil { if r.Error != nil {
return "", r.Error return "", r.Error
@ -334,6 +345,17 @@ func (r *Request) Send() error {
return nil return nil
} }
// copy will copy a request which will allow for local manipulation of the
// request.
func (r *Request) copy() *Request {
req := &Request{}
*req = *r
req.Handlers = r.Handlers.Copy()
op := *r.Operation
req.Operation = &op
return req
}
// AddToUserAgent adds the string to the end of the request's current user agent. // AddToUserAgent adds the string to the end of the request's current user agent.
func AddToUserAgent(r *Request, s string) { func AddToUserAgent(r *Request, s string) {
curUA := r.HTTPRequest.Header.Get("User-Agent") curUA := r.HTTPRequest.Header.Get("User-Agent")

View File

@ -34,7 +34,7 @@ type Session struct {
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New // If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
// method could now encounter an error when loading the configuration. When // method could now encounter an error when loading the configuration. When
// The environment variable is set, and an error occurs, New will return a // The environment variable is set, and an error occurs, New will return a
// session that will fail all requests reporting the error that occured while // session that will fail all requests reporting the error that occurred while
// loading the session. Use NewSession to get the error when creating the // loading the session. Use NewSession to get the error when creating the
// session. // session.
// //
@ -59,7 +59,7 @@ func New(cfgs ...*aws.Config) *Session {
// needs to be replicated if an error occurs while creating // needs to be replicated if an error occurs while creating
// the session. // the session.
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
"Use session.NewSession to handle errors occuring during session creation." "Use session.NewSession to handle errors occurring during session creation."
// Session creation failed, need to report the error and prevent // Session creation failed, need to report the error and prevent
// any requests from succeeding. // any requests from succeeding.
@ -89,7 +89,7 @@ func New(cfgs ...*aws.Config) *Session {
// to be built with retrieving credentials with AssumeRole set in the config. // to be built with retrieving credentials with AssumeRole set in the config.
// //
// See the NewSessionWithOptions func for information on how to override or // See the NewSessionWithOptions func for information on how to override or
// control through code how the Session will be created. Such as specifing the // control through code how the Session will be created. Such as specifying the
// config profile, and controlling if shared config is enabled or not. // config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) { func NewSession(cfgs ...*aws.Config) (*Session, error) {
envCfg := loadEnvConfig() envCfg := loadEnvConfig()
@ -124,7 +124,7 @@ type Options struct {
// Provides config values for the SDK to use when creating service clients // Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field // and making API requests to services. Any value set in with this field
// will override the associated value provided by the SDK defaults, // will override the associated value provided by the SDK defaults,
// environment or config files where relevent. // environment or config files where relevant.
// //
// If not set, configuration values from from SDK defaults, environment, // If not set, configuration values from from SDK defaults, environment,
// config will be used. // config will be used.

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.5.8" const SDKVersion = "1.5.13"

View File

@ -47,12 +47,22 @@ var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
func Build(r *request.Request) { func Build(r *request.Request) {
if r.ParamsFilled() { if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem() v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v) buildLocationElements(r, v, false)
buildBody(r, v) buildBody(r, v)
} }
} }
func buildLocationElements(r *request.Request, v reflect.Value) { // BuildAsGET builds the REST component of a service request with the ability to hoist
// data from the body.
func BuildAsGET(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
buildLocationElements(r, v, true)
buildBody(r, v)
}
}
func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
query := r.HTTPRequest.URL.Query() query := r.HTTPRequest.URL.Query()
for i := 0; i < v.NumField(); i++ { for i := 0; i < v.NumField(); i++ {
@ -84,6 +94,10 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
err = buildURI(r.HTTPRequest.URL, m, name) err = buildURI(r.HTTPRequest.URL, m, name)
case "querystring": case "querystring":
err = buildQueryString(query, m, name) err = buildQueryString(query, m, name)
default:
if buildGETQuery {
err = buildQueryString(query, m, name)
}
} }
r.Error = err r.Error = err
} }

View File

@ -111,11 +111,8 @@ func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
elems := node.Children[name] elems := node.Children[name]
if elems == nil { // try to find the field in attributes if elems == nil { // try to find the field in attributes
for _, a := range node.Attr { if val, ok := node.findElem(name); ok {
if name == a.Name.Local { elems = []*XMLNode{{Text: val}}
// turn this into a text node for de-serializing
elems = []*XMLNode{{Text: a.Value}}
}
} }
} }

View File

@ -2,6 +2,7 @@ package xmlutil
import ( import (
"encoding/xml" "encoding/xml"
"fmt"
"io" "io"
"sort" "sort"
) )
@ -12,6 +13,9 @@ type XMLNode struct {
Children map[string][]*XMLNode `json:",omitempty"` Children map[string][]*XMLNode `json:",omitempty"`
Text string `json:",omitempty"` Text string `json:",omitempty"`
Attr []xml.Attr `json:",omitempty"` Attr []xml.Attr `json:",omitempty"`
namespaces map[string]string
parent *XMLNode
} }
// NewXMLElement returns a pointer to a new XMLNode initialized to default values. // NewXMLElement returns a pointer to a new XMLNode initialized to default values.
@ -59,21 +63,54 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
slice = []*XMLNode{} slice = []*XMLNode{}
} }
node, e := XMLToStruct(d, &el) node, e := XMLToStruct(d, &el)
out.findNamespaces()
if e != nil { if e != nil {
return out, e return out, e
} }
node.Name = typed.Name node.Name = typed.Name
node.findNamespaces()
tempOut := *out
// Save into a temp variable, simply because out gets squashed during
// loop iterations
node.parent = &tempOut
slice = append(slice, node) slice = append(slice, node)
out.Children[name] = slice out.Children[name] = slice
case xml.EndElement: case xml.EndElement:
if s != nil && s.Name.Local == typed.Name.Local { // matching end token if s != nil && s.Name.Local == typed.Name.Local { // matching end token
return out, nil return out, nil
} }
out = &XMLNode{}
} }
} }
return out, nil return out, nil
} }
func (n *XMLNode) findNamespaces() {
ns := map[string]string{}
for _, a := range n.Attr {
if a.Name.Space == "xmlns" {
ns[a.Value] = a.Name.Local
}
}
n.namespaces = ns
}
func (n *XMLNode) findElem(name string) (string, bool) {
for node := n; node != nil; node = node.parent {
for _, a := range node.Attr {
namespace := a.Name.Space
if v, ok := node.namespaces[namespace]; ok {
namespace = v
}
if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
return a.Value, true
}
}
}
return "", false
}
// StructToXML writes an XMLNode to a xml.Encoder as tokens. // StructToXML writes an XMLNode to a xml.Encoder as tokens.
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})

File diff suppressed because it is too large Load Diff

View File

@ -83,7 +83,7 @@ func updateEndpointForAccelerate(r *request.Request) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
r.Error = awserr.New("InvalidParameterException", r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
nil) nil)
return return
} }

View File

@ -83,14 +83,26 @@ type S3API interface {
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketAnalyticsConfigurationRequest(*s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput)
DeleteBucketAnalyticsConfiguration(*s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error)
DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput)
DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error)
DeleteBucketInventoryConfigurationRequest(*s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput)
DeleteBucketInventoryConfiguration(*s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error)
DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput)
DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error)
DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
DeleteBucketMetricsConfiguration(*s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
@ -111,6 +123,10 @@ type S3API interface {
DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error)
DeleteObjectTaggingRequest(*s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput)
DeleteObjectTagging(*s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error)
DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput)
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
@ -123,10 +139,18 @@ type S3API interface {
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
GetBucketAnalyticsConfigurationRequest(*s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput)
GetBucketAnalyticsConfiguration(*s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error)
GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput)
GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error)
GetBucketInventoryConfigurationRequest(*s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput)
GetBucketInventoryConfiguration(*s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error)
GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput)
GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error)
@ -143,6 +167,10 @@ type S3API interface {
GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error)
GetBucketMetricsConfigurationRequest(*s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput)
GetBucketMetricsConfiguration(*s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error)
GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated)
GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error)
@ -183,6 +211,10 @@ type S3API interface {
GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error)
GetObjectTaggingRequest(*s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput)
GetObjectTagging(*s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error)
GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput)
GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error)
@ -195,6 +227,18 @@ type S3API interface {
HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
ListBucketAnalyticsConfigurationsRequest(*s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput)
ListBucketAnalyticsConfigurations(*s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error)
ListBucketInventoryConfigurationsRequest(*s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput)
ListBucketInventoryConfigurations(*s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error)
ListBucketMetricsConfigurationsRequest(*s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput)
ListBucketMetricsConfigurations(*s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error)
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error)
@ -237,10 +281,18 @@ type S3API interface {
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
PutBucketAnalyticsConfigurationRequest(*s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput)
PutBucketAnalyticsConfiguration(*s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error)
PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput)
PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error)
PutBucketInventoryConfigurationRequest(*s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput)
PutBucketInventoryConfiguration(*s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error)
PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput)
PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error)
@ -253,6 +305,10 @@ type S3API interface {
PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error)
PutBucketMetricsConfigurationRequest(*s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput)
PutBucketMetricsConfiguration(*s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error)
PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput)
PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error)
@ -293,6 +349,10 @@ type S3API interface {
PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error)
PutObjectTaggingRequest(*s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput)
PutObjectTagging(*s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error)
RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput)
RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error)

View File

@ -175,6 +175,9 @@ type UploadInput struct {
// The type of storage to use for the object. Defaults to 'STANDARD'. // The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object // If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores // to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata. // the value of this header in the object metadata.

View File

@ -138,3 +138,4 @@ Contributors
- Cezar Sa Espinola <cezarsa@gmail.com> - Cezar Sa Espinola <cezarsa@gmail.com>
- Sam Gunaratne <samgzeit@gmail.com> - Sam Gunaratne <samgzeit@gmail.com>
- Richard Scothern <richard.scothern@gmail.com> - Richard Scothern <richard.scothern@gmail.com>
- Michel Couillard <couillard.michel@voxlog.ca>

View File

@ -1303,10 +1303,14 @@ type ObjectOpenFile struct {
lengthOk bool // whether length is valid lengthOk bool // whether length is valid
length int64 // length of the object if read length int64 // length of the object if read
seeked bool // whether we have seeked this file or not seeked bool // whether we have seeked this file or not
overSeeked bool // set if we have seeked to the end or beyond
} }
// Read bytes from the object - see io.Reader // Read bytes from the object - see io.Reader
func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { func (file *ObjectOpenFile) Read(p []byte) (n int, err error) {
if file.overSeeked {
return 0, io.EOF
}
n, err = file.body.Read(p) n, err = file.body.Read(p)
file.bytes += int64(n) file.bytes += int64(n)
file.pos += int64(n) file.pos += int64(n)
@ -1330,6 +1334,7 @@ func (file *ObjectOpenFile) Read(p []byte) (n int, err error) {
// //
// Seek(0, 1) will return the current file pointer. // Seek(0, 1) will return the current file pointer.
func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) {
file.overSeeked = false
switch whence { switch whence {
case 0: // relative to start case 0: // relative to start
newPos = offset newPos = offset
@ -1340,6 +1345,10 @@ func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err er
return file.pos, newError(0, "Length of file unknown so can't seek from end") return file.pos, newError(0, "Length of file unknown so can't seek from end")
} }
newPos = file.length + offset newPos = file.length + offset
if offset >= 0 {
file.overSeeked = true
return
}
default: default:
panic("Unknown whence in ObjectOpenFile.Seek") panic("Unknown whence in ObjectOpenFile.Seek")
} }

View File

@ -1056,6 +1056,8 @@ func diff(expected interface{}, actual interface{}) string {
} }
var spewConfig = spew.ConfigState{ var spewConfig = spew.ConfigState{
Indent: " ", Indent: " ",
SortKeys: true, DisablePointerAddresses: true,
DisableCapacities: true,
SortKeys: true,
} }

View File

@ -20,6 +20,9 @@ var appengineVM bool
// Set at init time by appengine_hook.go. If nil, we're not on App Engine. // Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineAppIDFunc func(c context.Context) string
// AppEngineTokenSource returns a token source that fetches tokens // AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account. // issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine // If you are implementing a 3-legged OAuth 2.0 flow on App Engine

View File

@ -10,4 +10,5 @@ import "google.golang.org/appengine"
func init() { func init() {
appengineTokenFunc = appengine.AccessToken appengineTokenFunc = appengine.AccessToken
appengineAppIDFunc = appengine.AppID
} }

View File

@ -11,4 +11,5 @@ import "google.golang.org/appengine"
func init() { func init() {
appengineVM = true appengineVM = true
appengineTokenFunc = appengine.AccessToken appengineTokenFunc = appengine.AccessToken
appengineAppIDFunc = appengine.AppID
} }

View File

@ -18,16 +18,16 @@ import (
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
// DefaultClient returns an HTTP Client that uses the // DefaultCredentials holds "Application Default Credentials".
// DefaultTokenSource to obtain authentication credentials.
//
// This client should be used when developing services
// that run on Google App Engine or Google Compute Engine
// and use "Application Default Credentials."
//
// For more details, see: // For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials // https://developers.google.com/accounts/docs/application-default-credentials
// type DefaultCredentials struct {
ProjectID string // may be empty
TokenSource oauth2.TokenSource
}
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...) ts, err := DefaultTokenSource(ctx, scope...)
if err != nil { if err != nil {
@ -36,8 +36,18 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
return oauth2.NewClient(ctx, ts), nil return oauth2.NewClient(ctx, ts), nil
} }
// DefaultTokenSource is a token source that uses // DefaultTokenSource returns the token source for
// "Application Default Credentials". // "Application Default Credentials".
// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource.
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
creds, err := FindDefaultCredentials(ctx, scope...)
if err != nil {
return nil, err
}
return creds.TokenSource, nil
}
// FindDefaultCredentials searches for "Application Default Credentials".
// //
// It looks for credentials in the following places, // It looks for credentials in the following places,
// preferring the first location found: // preferring the first location found:
@ -51,45 +61,40 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches // 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
// credentials from the metadata server. // credentials from the metadata server.
// (In this final case any provided scopes are ignored.) // (In this final case any provided scopes are ignored.)
// func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
// First, try the environment variable. // First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS" const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" { if filename := os.Getenv(envVar); filename != "" {
ts, err := tokenSourceFromFile(ctx, filename, scope) creds, err := readCredentialsFile(ctx, filename, scope)
if err != nil { if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
} }
return ts, nil return creds, nil
} }
// Second, try a well-known file. // Second, try a well-known file.
filename := wellKnownFile() filename := wellKnownFile()
_, err := os.Stat(filename) if creds, err := readCredentialsFile(ctx, filename, scope); err == nil {
if err == nil { return creds, nil
ts, err2 := tokenSourceFromFile(ctx, filename, scope) } else if !os.IsNotExist(err) {
if err2 == nil {
return ts, nil
}
err = err2
} else if os.IsNotExist(err) {
err = nil // ignore this error
}
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
} }
// Third, if we're on Google App Engine use those credentials. // Third, if we're on Google App Engine use those credentials.
if appengineTokenFunc != nil && !appengineVM { if appengineTokenFunc != nil && !appengineVM {
return AppEngineTokenSource(ctx, scope...), nil return &DefaultCredentials{
ProjectID: appengineAppIDFunc(ctx),
TokenSource: AppEngineTokenSource(ctx, scope...),
}, nil
} }
// Fourth, if we're on Google Compute Engine use the metadata server. // Fourth, if we're on Google Compute Engine use the metadata server.
if metadata.OnGCE() { if metadata.OnGCE() {
return ComputeTokenSource(""), nil id, _ := metadata.ProjectID()
return &DefaultCredentials{
ProjectID: id,
TokenSource: ComputeTokenSource(""),
}, nil
} }
// None are found; return helpful error. // None are found; return helpful error.
@ -105,7 +110,7 @@ func wellKnownFile() string {
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
} }
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) {
b, err := ioutil.ReadFile(filename) b, err := ioutil.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err
@ -114,5 +119,12 @@ func tokenSourceFromFile(ctx context.Context, filename string, scopes []string)
if err := json.Unmarshal(b, &f); err != nil { if err := json.Unmarshal(b, &f); err != nil {
return nil, err return nil, err
} }
return f.tokenSource(ctx, scopes) ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
if err != nil {
return nil, err
}
return &DefaultCredentials{
ProjectID: f.ProjectID,
TokenSource: ts,
}, nil
} }

View File

@ -112,6 +112,7 @@ type credentialsFile struct {
PrivateKeyID string `json:"private_key_id"` PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"` PrivateKey string `json:"private_key"`
TokenURL string `json:"token_uri"` TokenURL string `json:"token_uri"`
ProjectID string `json:"project_id"`
// User Credential fields // User Credential fields
// (These typically come from gcloud auth.) // (These typically come from gcloud auth.)

View File

@ -128,6 +128,7 @@ includes_Linux='
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/serial.h> #include <linux/serial.h>
#include <linux/can.h>
#include <net/route.h> #include <net/route.h>
#include <asm/termbits.h> #include <asm/termbits.h>
@ -339,6 +340,7 @@ ccflags="$@"
$2 !~ /^(BPF_TIMEVAL)$/ && $2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ || $2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^CLOCK_/ || $2 ~ /^CLOCK_/ ||
$2 ~ /^CAN_/ ||
$2 !~ "WMESGLEN" && $2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__WCOREFLAG$/ {next}

View File

@ -470,25 +470,11 @@ func Sysctl(name string) (string, error) {
} }
func SysctlArgs(name string, args ...int) (string, error) { func SysctlArgs(name string, args ...int) (string, error) {
mib, err := sysctlmib(name, args...) buf, err := SysctlRaw(name, args...)
if err != nil { if err != nil {
return "", err return "", err
} }
n := len(buf)
// Find size.
n := uintptr(0)
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
return "", err
}
if n == 0 {
return "", nil
}
// Read into buffer of that size.
buf := make([]byte, n)
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
return "", err
}
// Throw away terminating NUL. // Throw away terminating NUL.
if n > 0 && buf[n-1] == '\x00' { if n > 0 && buf[n-1] == '\x00' {

View File

@ -411,6 +411,47 @@ func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
} }
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
// The RxID and TxID fields are used for transport protocol addressing in
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
//
// The SockaddrCAN struct must be bound to the socket file descriptor
// using Bind before the CAN socket can be used.
//
// // Read one raw CAN frame
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
// addr := &SockaddrCAN{Ifindex: index}
// Bind(fd, addr)
// frame := make([]byte, 16)
// Read(fd, frame)
//
// The full SocketCAN documentation can be found in the linux kernel
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
type SockaddrCAN struct {
Ifindex int
RxID uint32
TxID uint32
raw RawSockaddrCAN
}
func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
return nil, 0, EINVAL
}
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
for i := 0; i < 4; i++ {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
}
func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) { func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family { switch rsa.Addr.Family {
case AF_NETLINK: case AF_NETLINK:

View File

@ -58,6 +58,7 @@ package unix
#include <utime.h> #include <utime.h>
#include <bluetooth/bluetooth.h> #include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h> #include <bluetooth/hci.h>
#include <linux/can.h>
#ifdef TCSETS2 #ifdef TCSETS2
// On systems that have "struct termios2" use this as type Termios. // On systems that have "struct termios2" use this as type Termios.
@ -218,6 +219,8 @@ type RawSockaddrNetlink C.struct_sockaddr_nl
type RawSockaddrHCI C.struct_sockaddr_hci type RawSockaddrHCI C.struct_sockaddr_hci
type RawSockaddrCAN C.struct_sockaddr_can
type RawSockaddr C.struct_sockaddr type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any type RawSockaddrAny C.struct_sockaddr_any
@ -258,6 +261,7 @@ const (
SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll
SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl
SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci
SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can
SizeofLinger = C.sizeof_struct_linger SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPMreqn = C.sizeof_struct_ip_mreqn SizeofIPMreqn = C.sizeof_struct_ip_mreqn

View File

@ -190,6 +190,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -190,6 +190,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -186,6 +186,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -196,6 +196,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -197,6 +197,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x8000 BS1 = 0x8000
BSDLY = 0x8000 BSDLY = 0x8000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0xff CBAUD = 0xff
CBAUDEX = 0x0 CBAUDEX = 0x0
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -196,6 +196,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x8000 BS1 = 0x8000
BSDLY = 0x8000 BSDLY = 0x8000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0xff CBAUD = 0xff
CBAUDEX = 0x0 CBAUDEX = 0x0
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -201,6 +201,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -205,6 +205,25 @@ const (
BS0 = 0x0 BS0 = 0x0
BS1 = 0x2000 BS1 = 0x2000
BSDLY = 0x2000 BSDLY = 0x2000
CAN_BCM = 0x2
CAN_EFF_FLAG = 0x80000000
CAN_EFF_ID_BITS = 0x1d
CAN_EFF_MASK = 0x1fffffff
CAN_ERR_FLAG = 0x20000000
CAN_ERR_MASK = 0x1fffffff
CAN_INV_FILTER = 0x20000000
CAN_ISOTP = 0x6
CAN_MAX_DLC = 0x8
CAN_MAX_DLEN = 0x8
CAN_MCNET = 0x5
CAN_MTU = 0x10
CAN_NPROTO = 0x7
CAN_RAW = 0x1
CAN_RTR_FLAG = 0x40000000
CAN_SFF_ID_BITS = 0xb
CAN_SFF_MASK = 0x7ff
CAN_TP16 = 0x3
CAN_TP20 = 0x4
CBAUD = 0x100f CBAUD = 0x100f
CBAUDEX = 0x1000 CBAUDEX = 0x1000
CFLUSH = 0xf CFLUSH = 0xf

View File

@ -203,6 +203,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -326,6 +333,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -205,6 +205,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]uint8 Data [14]uint8
@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -331,6 +338,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]uint8 Data [14]uint8
@ -332,6 +339,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]uint8 Data [14]uint8
@ -332,6 +339,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -211,6 +211,13 @@ type RawSockaddrHCI struct {
Channel uint16 Channel uint16
} }
type RawSockaddrCAN struct {
Family uint16
Pad_cgo_0 [2]byte
Ifindex int32
Addr [8]byte
}
type RawSockaddr struct { type RawSockaddr struct {
Family uint16 Family uint16
Data [14]int8 Data [14]int8
@ -335,6 +342,7 @@ const (
SizeofSockaddrLinklayer = 0x14 SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6 SizeofSockaddrHCI = 0x6
SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8 SizeofLinger = 0x8
SizeofIPMreq = 0x8 SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc SizeofIPMreqn = 0xc

View File

@ -1,11 +1,11 @@
{ {
"kind": "discovery#restDescription", "kind": "discovery#restDescription",
"etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/G3kZz5Dv92Y-2NZwaNrcr5jwm4A\"", "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/sMgjc4eoIFjgub4daTU-MGW0WMA\"",
"discoveryVersion": "v1", "discoveryVersion": "v1",
"id": "storage:v1", "id": "storage:v1",
"name": "storage", "name": "storage",
"version": "v1", "version": "v1",
"revision": "20161019", "revision": "20161109",
"title": "Cloud Storage JSON API", "title": "Cloud Storage JSON API",
"description": "Stores and retrieves potentially large, immutable data objects.", "description": "Stores and retrieves potentially large, immutable data objects.",
"ownerDomain": "google.com", "ownerDomain": "google.com",
@ -685,6 +685,11 @@
"description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
"format": "date-time" "format": "date-time"
}, },
"timeStorageClassUpdated": {
"type": "string",
"description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
"format": "date-time"
},
"updated": { "updated": {
"type": "string", "type": "string",
"description": "The modification time of the object metadata in RFC 3339 format.", "description": "The modification time of the object metadata in RFC 3339 format.",

View File

@ -1034,6 +1034,11 @@ type Object struct {
// deleted. // deleted.
TimeDeleted string `json:"timeDeleted,omitempty"` TimeDeleted string `json:"timeDeleted,omitempty"`
// TimeStorageClassUpdated: The time at which the object's storage class
// was last changed. When the object is initially created, it will be
// set to timeCreated.
TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
// Updated: The modification time of the object metadata in RFC 3339 // Updated: The modification time of the object metadata in RFC 3339
// format. // format.
Updated string `json:"updated,omitempty"` Updated string `json:"updated,omitempty"`

View File

@ -82,7 +82,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK { if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases. // TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary. // Fix the order if necessary.
stats.Handle(ctx, inPayload) stats.HandleRPC(ctx, inPayload)
} }
c.trailerMD = stream.Trailer() c.trailerMD = stream.Trailer()
return nil return nil
@ -121,7 +121,7 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd
err = t.Write(stream, outBuf, opts) err = t.Write(stream, outBuf, opts)
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
outPayload.SentTime = time.Now() outPayload.SentTime = time.Now()
stats.Handle(ctx, outPayload) stats.HandleRPC(ctx, outPayload)
} }
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
@ -172,12 +172,13 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}() }()
} }
if stats.On() { if stats.On() {
ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
begin := &stats.Begin{ begin := &stats.Begin{
Client: true, Client: true,
BeginTime: time.Now(), BeginTime: time.Now(),
FailFast: c.failFast, FailFast: c.failFast,
} }
stats.Handle(ctx, begin) stats.HandleRPC(ctx, begin)
} }
defer func() { defer func() {
if stats.On() { if stats.On() {
@ -186,7 +187,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
EndTime: time.Now(), EndTime: time.Now(),
Error: e, Error: e,
} }
stats.Handle(ctx, end) stats.HandleRPC(ctx, end)
} }
}() }()
topts := &transport.Options{ topts := &transport.Options{

View File

@ -583,7 +583,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
err = t.Write(stream, p, opts) err = t.Write(stream, p, opts)
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
outPayload.SentTime = time.Now() outPayload.SentTime = time.Now()
stats.Handle(stream.Context(), outPayload) stats.HandleRPC(stream.Context(), outPayload)
} }
return err return err
} }
@ -593,7 +593,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
begin := &stats.Begin{ begin := &stats.Begin{
BeginTime: time.Now(), BeginTime: time.Now(),
} }
stats.Handle(stream.Context(), begin) stats.HandleRPC(stream.Context(), begin)
} }
defer func() { defer func() {
if stats.On() { if stats.On() {
@ -603,7 +603,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
end.Error = toRPCErr(err) end.Error = toRPCErr(err)
} }
stats.Handle(stream.Context(), end) stats.HandleRPC(stream.Context(), end)
} }
}() }()
if trInfo != nil { if trInfo != nil {
@ -698,7 +698,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
inPayload.Payload = v inPayload.Payload = v
inPayload.Data = req inPayload.Data = req
inPayload.Length = len(req) inPayload.Length = len(req)
stats.Handle(stream.Context(), inPayload) stats.HandleRPC(stream.Context(), inPayload)
} }
if trInfo != nil { if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
@ -759,7 +759,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
begin := &stats.Begin{ begin := &stats.Begin{
BeginTime: time.Now(), BeginTime: time.Now(),
} }
stats.Handle(stream.Context(), begin) stats.HandleRPC(stream.Context(), begin)
} }
defer func() { defer func() {
if stats.On() { if stats.On() {
@ -769,7 +769,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
end.Error = toRPCErr(err) end.Error = toRPCErr(err)
} }
stats.Handle(stream.Context(), end) stats.HandleRPC(stream.Context(), end)
} }
}() }()
if s.opts.cp != nil { if s.opts.cp != nil {

152
vendor/google.golang.org/grpc/stats/handlers.go generated vendored Normal file
View File

@ -0,0 +1,152 @@
/*
*
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
package stats
import (
"net"
"sync/atomic"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
)
// ConnTagInfo defines the relevant information needed by connection context tagger.
type ConnTagInfo struct {
// RemoteAddr is the remote address of the corresponding connection.
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
// TODO add QOS related fields.
}
// RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string
}
var (
on = new(int32)
rpcHandler func(context.Context, RPCStats)
connHandler func(context.Context, ConnStats)
connTagger func(context.Context, *ConnTagInfo) context.Context
rpcTagger func(context.Context, *RPCTagInfo) context.Context
)
// HandleRPC processes the RPC stats using the rpc handler registered by the user.
func HandleRPC(ctx context.Context, s RPCStats) {
if rpcHandler == nil {
return
}
rpcHandler(ctx, s)
}
// RegisterRPCHandler registers the user handler function for RPC stats processing.
// It should be called only once. The later call will overwrite the former value if it is called multiple times.
// This handler function will be called to process the rpc stats.
func RegisterRPCHandler(f func(context.Context, RPCStats)) {
rpcHandler = f
}
// HandleConn processes the stats using the call back function registered by user.
func HandleConn(ctx context.Context, s ConnStats) {
if connHandler == nil {
return
}
connHandler(ctx, s)
}
// RegisterConnHandler registers the user handler function for conn stats.
// It should be called only once. The later call will overwrite the former value if it is called multiple times.
// This handler function will be called to process the conn stats.
func RegisterConnHandler(f func(context.Context, ConnStats)) {
connHandler = f
}
// TagConn calls user registered connection context tagger.
func TagConn(ctx context.Context, info *ConnTagInfo) context.Context {
if connTagger == nil {
return ctx
}
return connTagger(ctx, info)
}
// RegisterConnTagger registers the user connection context tagger function.
// The connection context tagger can attach some information to the given context.
// The returned context will be used for stats handling.
// For conn stats handling, the context used in connHandler for this
// connection will be derived from the context returned.
// For RPC stats handling,
// - On server side, the context used in rpcHandler for all RPCs on this
// connection will be derived from the context returned.
// - On client side, the context is not derived from the context returned.
func RegisterConnTagger(t func(context.Context, *ConnTagInfo) context.Context) {
connTagger = t
}
// TagRPC calls the user registered RPC context tagger.
func TagRPC(ctx context.Context, info *RPCTagInfo) context.Context {
if rpcTagger == nil {
return ctx
}
return rpcTagger(ctx, info)
}
// RegisterRPCTagger registers the user RPC context tagger function.
// The RPC context tagger can attach some information to the given context.
// The context used in stats rpcHandler for this RPC will be derived from the
// context returned.
func RegisterRPCTagger(t func(context.Context, *RPCTagInfo) context.Context) {
rpcTagger = t
}
// Start starts the stats collection and processing if there is a registered stats handle.
func Start() {
if rpcHandler == nil && connHandler == nil {
grpclog.Println("rpcHandler and connHandler are both nil when starting stats. Stats is not started")
return
}
atomic.StoreInt32(on, 1)
}
// Stop stops the stats collection and processing.
// Stop does not unregister the handlers.
func Stop() {
atomic.StoreInt32(on, 0)
}
// On indicates whether the stats collection and processing is on.
func On() bool {
return atomic.CompareAndSwapInt32(on, 1, 1)
}

View File

@ -38,16 +38,12 @@ package stats
import ( import (
"net" "net"
"sync/atomic"
"time" "time"
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
) )
// RPCStats contains stats information about RPCs. // RPCStats contains stats information about RPCs.
// All stats types in this package implements this interface.
type RPCStats interface { type RPCStats interface {
isRPCStats()
// IsClient returns true if this RPCStats is from client side. // IsClient returns true if this RPCStats is from client side.
IsClient() bool IsClient() bool
} }
@ -66,6 +62,8 @@ type Begin struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
// InPayload contains the information for an incoming payload. // InPayload contains the information for an incoming payload.
type InPayload struct { type InPayload struct {
// Client is true if this InPayload is from client side. // Client is true if this InPayload is from client side.
@ -85,6 +83,8 @@ type InPayload struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received. // InHeader contains stats when a header is received.
// FullMethod, addresses and Compression are only valid if Client is false. // FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct { type InHeader struct {
@ -106,6 +106,8 @@ type InHeader struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
// InTrailer contains stats when a trailer is received. // InTrailer contains stats when a trailer is received.
type InTrailer struct { type InTrailer struct {
// Client is true if this InTrailer is from client side. // Client is true if this InTrailer is from client side.
@ -117,6 +119,8 @@ type InTrailer struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
// OutPayload contains the information for an outgoing payload. // OutPayload contains the information for an outgoing payload.
type OutPayload struct { type OutPayload struct {
// Client is true if this OutPayload is from client side. // Client is true if this OutPayload is from client side.
@ -136,6 +140,8 @@ type OutPayload struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent. // OutHeader contains stats when a header is sent.
// FullMethod, addresses and Compression are only valid if Client is true. // FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct { type OutHeader struct {
@ -157,6 +163,8 @@ type OutHeader struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
// OutTrailer contains stats when a trailer is sent. // OutTrailer contains stats when a trailer is sent.
type OutTrailer struct { type OutTrailer struct {
// Client is true if this OutTrailer is from client side. // Client is true if this OutTrailer is from client side.
@ -168,6 +176,8 @@ type OutTrailer struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
// End contains stats when an RPC ends. // End contains stats when an RPC ends.
type End struct { type End struct {
// Client is true if this End is from client side. // Client is true if this End is from client side.
@ -181,39 +191,33 @@ type End struct {
// IsClient indicates if this is from client side. // IsClient indicates if this is from client side.
func (s *End) IsClient() bool { return s.Client } func (s *End) IsClient() bool { return s.Client }
var ( func (s *End) isRPCStats() {}
on = new(int32)
handler func(context.Context, RPCStats)
)
// On indicates whether stats is started. // ConnStats contains stats information about connections.
func On() bool { type ConnStats interface {
return atomic.CompareAndSwapInt32(on, 1, 1) isConnStats()
// IsClient returns true if this ConnStats is from client side.
IsClient() bool
} }
// Handle processes the stats using the call back function registered by user. // ConnBegin contains the stats of a connection when it is established.
func Handle(ctx context.Context, s RPCStats) { type ConnBegin struct {
handler(ctx, s) // Client is true if this ConnBegin is from client side.
Client bool
} }
// RegisterHandler registers the user handler function. // IsClient indicates if this is from client side.
// If another handler was registered before, this new handler will overwrite the old one. func (s *ConnBegin) IsClient() bool { return s.Client }
// This handler function will be called to process the stats.
func RegisterHandler(f func(context.Context, RPCStats)) { func (s *ConnBegin) isConnStats() {}
handler = f
// ConnEnd contains the stats of a connection when it ends.
type ConnEnd struct {
// Client is true if this ConnEnd is from client side.
Client bool
} }
// Start starts the stats collection and reporting if there is a registered stats handle. // IsClient indicates if this is from client side.
func Start() { func (s *ConnEnd) IsClient() bool { return s.Client }
if handler == nil {
grpclog.Println("handler is nil when starting stats. Stats is not started")
return
}
atomic.StoreInt32(on, 1)
}
// Stop stops the stats collection and processing. func (s *ConnEnd) isConnStats() {}
// Stop does not unregister handler.
func Stop() {
atomic.StoreInt32(on, 0)
}

View File

@ -145,12 +145,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}() }()
} }
if stats.On() { if stats.On() {
ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
begin := &stats.Begin{ begin := &stats.Begin{
Client: true, Client: true,
BeginTime: time.Now(), BeginTime: time.Now(),
FailFast: c.failFast, FailFast: c.failFast,
} }
stats.Handle(ctx, begin) stats.HandleRPC(ctx, begin)
} }
defer func() { defer func() {
if err != nil && stats.On() { if err != nil && stats.On() {
@ -159,7 +160,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
Client: true, Client: true,
Error: err, Error: err,
} }
stats.Handle(ctx, end) stats.HandleRPC(ctx, end)
} }
}() }()
gopts := BalancerGetOptions{ gopts := BalancerGetOptions{
@ -342,7 +343,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
err = cs.t.Write(cs.s, out, &transport.Options{Last: false}) err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
if err == nil && outPayload != nil { if err == nil && outPayload != nil {
outPayload.SentTime = time.Now() outPayload.SentTime = time.Now()
stats.Handle(cs.statsCtx, outPayload) stats.HandleRPC(cs.statsCtx, outPayload)
} }
return err return err
} }
@ -360,7 +361,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
if err != io.EOF { if err != io.EOF {
end.Error = toRPCErr(err) end.Error = toRPCErr(err)
} }
stats.Handle(cs.statsCtx, end) stats.HandleRPC(cs.statsCtx, end)
} }
}() }()
var inPayload *stats.InPayload var inPayload *stats.InPayload
@ -385,7 +386,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
cs.mu.Unlock() cs.mu.Unlock()
} }
if inPayload != nil { if inPayload != nil {
stats.Handle(cs.statsCtx, inPayload) stats.HandleRPC(cs.statsCtx, inPayload)
} }
if !cs.desc.ClientStreams || cs.desc.ServerStreams { if !cs.desc.ClientStreams || cs.desc.ServerStreams {
return return
@ -565,7 +566,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
} }
if outPayload != nil { if outPayload != nil {
outPayload.SentTime = time.Now() outPayload.SentTime = time.Now()
stats.Handle(ss.s.Context(), outPayload) stats.HandleRPC(ss.s.Context(), outPayload)
} }
return nil return nil
} }
@ -599,7 +600,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
return toRPCErr(err) return toRPCErr(err)
} }
if inPayload != nil { if inPayload != nil {
stats.Handle(ss.s.Context(), inPayload) stats.HandleRPC(ss.s.Context(), inPayload)
} }
return nil return nil
} }

View File

@ -111,35 +111,9 @@ func newQuotaPool(q int) *quotaPool {
return qb return qb
} }
// add adds n to the available quota and tries to send it on acquire. // add cancels the pending quota sent on acquired, incremented by v and sends
func (qb *quotaPool) add(n int) {
qb.mu.Lock()
defer qb.mu.Unlock()
qb.quota += n
if qb.quota <= 0 {
return
}
select {
case qb.c <- qb.quota:
qb.quota = 0
default:
}
}
// cancel cancels the pending quota sent on acquire, if any.
func (qb *quotaPool) cancel() {
qb.mu.Lock()
defer qb.mu.Unlock()
select {
case n := <-qb.c:
qb.quota += n
default:
}
}
// reset cancels the pending quota sent on acquired, incremented by v and sends
// it back on acquire. // it back on acquire.
func (qb *quotaPool) reset(v int) { func (qb *quotaPool) add(v int) {
qb.mu.Lock() qb.mu.Lock()
defer qb.mu.Unlock() defer qb.mu.Unlock()
select { select {
@ -151,6 +125,10 @@ func (qb *quotaPool) reset(v int) {
if qb.quota <= 0 { if qb.quota <= 0 {
return return
} }
// After the pool has been created, this is the only place that sends on
// the channel. Since mu is held at this point and any quota that was sent
// on the channel has been retrieved, we know that this code will always
// place any positive quota value on the channel.
select { select {
case qb.c <- qb.quota: case qb.c <- qb.quota:
qb.quota = 0 qb.quota = 0

View File

@ -56,6 +56,7 @@ import (
// http2Client implements the ClientTransport interface with HTTP2. // http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct { type http2Client struct {
ctx context.Context
target string // server name/addr target string // server name/addr
userAgent string userAgent string
md interface{} md interface{}
@ -181,6 +182,7 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
} }
var buf bytes.Buffer var buf bytes.Buffer
t := &http2Client{ t := &http2Client{
ctx: ctx,
target: addr.Addr, target: addr.Addr,
userAgent: ua, userAgent: ua,
md: addr.Metadata, md: addr.Metadata,
@ -242,6 +244,16 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
} }
go t.controller() go t.controller()
t.writableChan <- 0 t.writableChan <- 0
if stats.On() {
t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{
Client: true,
}
stats.HandleConn(t.ctx, connBegin)
}
return t, nil return t, nil
} }
@ -367,7 +379,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
} }
t.mu.Unlock() t.mu.Unlock()
if reset { if reset {
t.streamsQuota.reset(-1) t.streamsQuota.add(-1)
} }
// HPACK encodes various headers. Note that once WriteField(...) is // HPACK encodes various headers. Note that once WriteField(...) is
@ -467,7 +479,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
LocalAddr: t.localAddr, LocalAddr: t.localAddr,
Compression: callHdr.SendCompress, Compression: callHdr.SendCompress,
} }
stats.Handle(s.clientStatsCtx, outHeader) stats.HandleRPC(s.clientStatsCtx, outHeader)
} }
t.writableChan <- 0 t.writableChan <- 0
return s, nil return s, nil
@ -547,6 +559,12 @@ func (t *http2Client) Close() (err error) {
s.mu.Unlock() s.mu.Unlock()
s.write(recvMsg{err: ErrConnClosing}) s.write(recvMsg{err: ErrConnClosing})
} }
if stats.On() {
connEnd := &stats.ConnEnd{
Client: true,
}
stats.HandleConn(t.ctx, connEnd)
}
return return
} }
@ -604,19 +622,14 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
var p []byte var p []byte
if r.Len() > 0 { if r.Len() > 0 {
size := http2MaxFrameLen size := http2MaxFrameLen
s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data. // Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil { if err != nil {
return err return err
} }
t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data. // Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil { if err != nil {
if _, ok := err.(StreamError); ok || err == io.EOF {
t.sendQuotaPool.cancel()
}
return err return err
} }
if sq < size { if sq < size {
@ -904,13 +917,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
Client: true, Client: true,
WireLength: int(frame.Header().Length), WireLength: int(frame.Header().Length),
} }
stats.Handle(s.clientStatsCtx, inHeader) stats.HandleRPC(s.clientStatsCtx, inHeader)
} else { } else {
inTrailer := &stats.InTrailer{ inTrailer := &stats.InTrailer{
Client: true, Client: true,
WireLength: int(frame.Header().Length), WireLength: int(frame.Header().Length),
} }
stats.Handle(s.clientStatsCtx, inTrailer) stats.HandleRPC(s.clientStatsCtx, inTrailer)
} }
} }
}() }()
@ -1035,13 +1048,13 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
t.maxStreams = int(s.Val) t.maxStreams = int(s.Val)
t.mu.Unlock() t.mu.Unlock()
if reset { if reset {
t.streamsQuota.reset(int(s.Val) - ms) t.streamsQuota.add(int(s.Val) - ms)
} }
case http2.SettingInitialWindowSize: case http2.SettingInitialWindowSize:
t.mu.Lock() t.mu.Lock()
for _, stream := range t.activeStreams { for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream. // Adjust the sending quota for each stream.
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
} }
t.streamSendQuota = s.Val t.streamSendQuota = s.Val
t.mu.Unlock() t.mu.Unlock()

View File

@ -60,6 +60,7 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
// http2Server implements the ServerTransport interface with HTTP2. // http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct { type http2Server struct {
ctx context.Context
conn net.Conn conn net.Conn
remoteAddr net.Addr remoteAddr net.Addr
localAddr net.Addr localAddr net.Addr
@ -127,6 +128,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
} }
var buf bytes.Buffer var buf bytes.Buffer
t := &http2Server{ t := &http2Server{
ctx: context.Background(),
conn: conn, conn: conn,
remoteAddr: conn.RemoteAddr(), remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(), localAddr: conn.LocalAddr(),
@ -145,6 +147,14 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
activeStreams: make(map[uint32]*Stream), activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize, streamSendQuota: defaultWindowSize,
} }
if stats.On() {
t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
stats.HandleConn(t.ctx, connBegin)
}
go t.controller() go t.controller()
t.writableChan <- 0 t.writableChan <- 0
return t, nil return t, nil
@ -177,9 +187,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} }
s.recvCompress = state.encoding s.recvCompress = state.encoding
if state.timeoutSet { if state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
} else { } else {
s.ctx, s.cancel = context.WithCancel(context.TODO()) s.ctx, s.cancel = context.WithCancel(t.ctx)
} }
pr := &peer.Peer{ pr := &peer.Peer{
Addr: t.remoteAddr, Addr: t.remoteAddr,
@ -241,6 +251,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
} }
s.ctx = traceCtx(s.ctx, s.method) s.ctx = traceCtx(s.ctx, s.method)
if stats.On() { if stats.On() {
s.ctx = stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{ inHeader := &stats.InHeader{
FullMethod: s.method, FullMethod: s.method,
RemoteAddr: t.remoteAddr, RemoteAddr: t.remoteAddr,
@ -248,7 +259,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
Compression: s.recvCompress, Compression: s.recvCompress,
WireLength: int(frame.Header().Length), WireLength: int(frame.Header().Length),
} }
stats.Handle(s.ctx, inHeader) stats.HandleRPC(s.ctx, inHeader)
} }
handle(s) handle(s)
return return
@ -533,7 +544,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
outHeader := &stats.OutHeader{ outHeader := &stats.OutHeader{
WireLength: bufLen, WireLength: bufLen,
} }
stats.Handle(s.Context(), outHeader) stats.HandleRPC(s.Context(), outHeader)
} }
t.writableChan <- 0 t.writableChan <- 0
return nil return nil
@ -596,7 +607,7 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
outTrailer := &stats.OutTrailer{ outTrailer := &stats.OutTrailer{
WireLength: bufLen, WireLength: bufLen,
} }
stats.Handle(s.Context(), outTrailer) stats.HandleRPC(s.Context(), outTrailer)
} }
t.closeStream(s) t.closeStream(s)
t.writableChan <- 0 t.writableChan <- 0
@ -626,19 +637,14 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
return nil return nil
} }
size := http2MaxFrameLen size := http2MaxFrameLen
s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data. // Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil { if err != nil {
return err return err
} }
t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data. // Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil { if err != nil {
if _, ok := err.(StreamError); ok {
t.sendQuotaPool.cancel()
}
return err return err
} }
if sq < size { if sq < size {
@ -706,7 +712,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
t.mu.Lock() t.mu.Lock()
defer t.mu.Unlock() defer t.mu.Unlock()
for _, stream := range t.activeStreams { for _, stream := range t.activeStreams {
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
} }
t.streamSendQuota = s.Val t.streamSendQuota = s.Val
} }
@ -783,6 +789,10 @@ func (t *http2Server) Close() (err error) {
for _, s := range streams { for _, s := range streams {
s.cancel() s.cancel()
} }
if stats.On() {
connEnd := &stats.ConnEnd{}
stats.HandleConn(t.ctx, connEnd)
}
return return
} }