1
mirror of https://github.com/rclone/rclone synced 2024-11-23 00:06:55 +01:00

vendor: update all dependencies to latest versions

This commit is contained in:
Nick Craig-Wood 2017-09-30 15:27:27 +01:00
parent 911d121bb9
commit b017fcfe9a
3048 changed files with 537057 additions and 189681 deletions

87
Gopkg.lock generated
View File

@ -10,20 +10,20 @@
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "a5913b3f7deecba45e98ff33cefbac4fd204ddd7"
version = "v0.10.0"
revision = "f6de2c509ed9d2af648c3c147207eaaf97149aed"
version = "v0.14.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["storage"]
revision = "5b6066bbd213e47c49a5fa2be2b29529bbcb6704"
version = "v10.1.1-beta"
revision = "2592daf71ab6b95dcfc7f7437ecc1afb9ddb7360"
version = "v11.0.0-beta"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "10791a4516e77c53ab10f198f144804cca3d5b43"
version = "v8.1.0"
revision = "f6be1abbb5abd0517522f850dd785990d373da7e"
version = "v8.4.0"
[[projects]]
branch = "master"
@ -35,31 +35,31 @@
branch = "master"
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "4cc8cc5a2a44f01d31b303a7280e20e00a6eafdb"
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "fb478f41c87d959e328f2eac0c1b40f17a2f3e00"
revision = "4b871cd428eeab064e45ff2bf65054bb7eb10d6c"
[[projects]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
revision = "bd544a64249f735bcf394ab0c4fa8bec9b31f221"
revision = "5a2026bfb28e86839f9fcc46523850319399006c"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "35bcf037030dcadcd247618c75c00c6cd17482d7"
version = "v1.0.2"
revision = "3a24389863c5bf906de391226ee8c4ec2c925bfe"
version = "v1.0.3"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
version = "v1.0.6"
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
version = "v1.0.7"
[[projects]]
name = "github.com/davecgh/go-spew"
@ -82,14 +82,14 @@
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "d3de07a94d22b4a0972deb4b96d790c2c0ce8333"
version = "v1.28.0"
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
version = "v1.28.2"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "0a4f71a498b7c4812f64969510bcb4eca251e33a"
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
[[projects]]
branch = "master"
@ -98,22 +98,21 @@
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
branch = "master"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "769512c448b98e9efa243279a7e281248332aa98"
revision = "299b7ff5b6096588cceca2edc1fc9f557002fb85"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
version = "0.2.2"
revision = "0b12d6b5"
[[projects]]
branch = "master"
@ -137,7 +136,7 @@
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "5068c3506cf003c630c94b92a64e978115394f26"
revision = "9d3f812e23d270d1c66a9a01e20af1005061cdc4"
[[projects]]
branch = "master"
@ -155,13 +154,13 @@
branch = "master"
name = "github.com/pkg/errors"
packages = ["."]
revision = "c605e284fe17294bda444b34710735b29d1a9d90"
revision = "2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb"
[[projects]]
branch = "master"
name = "github.com/pkg/sftp"
packages = ["."]
revision = "4f3e725e885c021085d2fb8a9cc26e30ea1a992f"
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
@ -173,13 +172,13 @@
branch = "master"
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "da627cc50b6fb2eb623eaffe91fb29d7eddfd06a"
revision = "7c8316a9cb0a6af865265f899f5de6aadb31a24b"
[[projects]]
name = "github.com/russross/blackfriday"
packages = ["."]
revision = "0b647d0506a698cca42caca173e55559b12a69f2"
version = "v1.4"
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
version = "v1.5"
[[projects]]
name = "github.com/satori/uuid"
@ -187,17 +186,11 @@
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/shurcooL/sanitized_anchor_name"
packages = ["."]
revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "a3f95b5c423586578a4e099b11a46c2479628cac"
version = "1.0.2"
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
version = "v1.0.3"
[[projects]]
branch = "master"
@ -209,7 +202,7 @@
branch = "master"
name = "github.com/spf13/cobra"
packages = [".","doc"]
revision = "2df9a531813370438a4d79bfc33e21f58063ed87"
revision = "e5f66de850af3302fbe378c8acded2b0fa55472c"
[[projects]]
branch = "master"
@ -221,7 +214,7 @@
branch = "master"
name = "github.com/stretchr/testify"
packages = ["assert","require"]
revision = "05e8a0eda380579888eb53c394909df027f06991"
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
[[projects]]
branch = "master"
@ -233,49 +226,49 @@
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"]
revision = "4749bc5ebe4857b353e55b49ebe91eed9a7f6cda"
revision = "088fbd27bd49adf215d02a05c36c5ac2d243d1f1"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
revision = "6914964337150723782436d56b3f21610a74ce7b"
revision = "76eec36fa14229c4b25bb894c2d0e591527af429"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","html","html/atom"]
revision = "ab5485076ff3407ad2d02db054635913f017b0ed"
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "b53b38ad8a6435bd399ea76d0fa74f23149cca4e"
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "02a66801d979d706a4b445e24e03916eeaa2a404"
packages = ["unix","windows"]
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
revision = "836efe42bb4aa16aaa17b9c155d8813d336ed720"
revision = "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "8be79e1e0910c292df4e79c241bb7e8f7e725959"
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
[[projects]]
branch = "master"
name = "google.golang.org/api"
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
revision = "295e4bb0ade057ae2cfb9876ab0b54635dbfcea4"
revision = "906273f42cdebd65de3a53f30dd9e23de1b55ba9"
[[projects]]
name = "google.golang.org/appengine"
@ -287,7 +280,7 @@
branch = "v2"
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "25c4ec802a7d637f88d584ab26798e94ad14c13b"
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[solve-meta]
analyzer-name = "dep"

54
vendor/cloud.google.com/go/MIGRATION.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
# Code Changes
## v0.10.0
- pubsub: Replace
```
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
```
with
```
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
})
```
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
Given an initialized `*trace.Client` named `tc`, instead of
```
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
```
write
```
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
```
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
Instead of
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
```
write
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
```
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
interceptor as a dial option as shown below when initializing Cloud package
clients:
```
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
if err != nil {
...
}
```

129
vendor/cloud.google.com/go/README.md generated vendored
View File

@ -33,21 +33,110 @@ make backwards-incompatible changes.
## News
_March 17, 2017_
_September 28, 2017_
Breaking Pubsub changes.
* Publish is now asynchronous
([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
* Message.Done replaced with Message.Ack and Message.Nack.
*v0.14.0*
_February 14, 2017_
- bigquery BREAKING CHANGES:
- Standard SQL is the default for queries and views.
- `Table.Create` takes `TableMetadata` as a second argument, instead of
options.
- `Dataset.Create` takes `DatasetMetadata` as a second argument.
- `DatasetMetadata` field `ID` renamed to `FullID`
- `TableMetadata` field `ID` renamed to `FullID`
Release of a client library for Spanner. See
the
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
- Other bigquery changes:
- The client will append a random suffix to a provided job ID if you set
`AddJobIDSuffix` to true in a job config.
- Listing jobs is supported.
- Better retry logic.
Note that although the Spanner service is beta, the Go client library is alpha.
- vision, language, speech: clients are now stable
- monitoring: client is now beta
- profiler:
- Rename InstanceName to Instance, ZoneName to Zone
- Auto-detect service name and version on AppEngine.
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
_July 31, 2017_
*v0.11.0*
- Clients for spanner, pubsub and video are now in beta.
- New client for DLP.
- spanner: performance and testing improvements.
- storage: requester-pays buckets are supported.
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
- pubsub: bug fixes and other minor improvements
_June 17, 2017_
*v0.10.0*
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
- vision: cloud.google.com/go/vision is deprecated. Use
cloud.google.com/go/vision/apiv1 instead.
- translation: now stable.
- trace: several changes to the surface. See the link below.
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
@ -61,16 +150,16 @@ Google API | Status | Package
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | beta | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | beta | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| alpha | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errors`][cloud-errors-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
> **Alpha status**: the API is still being actively developed. As a
@ -437,6 +526,6 @@ for more information.
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errors
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

View File

@ -20,7 +20,7 @@ import (
"fmt"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -51,7 +51,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}

View File

@ -21,9 +21,12 @@ import (
// CopyConfig holds the configuration for a copy job.
type CopyConfig struct {
// JobID is the ID to use for the copy job. If unset, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Srcs are the tables from which data will be copied.
Srcs []*Table
@ -68,7 +71,9 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) {
for _, t := range c.Srcs {
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
}
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
setJobRef(job, c.JobID, c.c.projectID)
job := &bq.Job{
JobReference: createJobRef(c.JobID, c.AddJobIDSuffix, c.c.projectID),
Configuration: &bq.JobConfiguration{Copy: conf},
}
return c.c.insertJob(ctx, &insertJobConf{job: job})
}

View File

@ -15,7 +15,6 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
@ -24,6 +23,7 @@ import (
func defaultCopyJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{
@ -44,6 +44,7 @@ func defaultCopyJob() *bq.Job {
}
func TestCopy(t *testing.T) {
defer fixRandomJobID("RANDOM")()
testCases := []struct {
dst *Table
srcs []*Table
@ -105,16 +106,13 @@ func TestCopy(t *testing.T) {
config: CopyConfig{JobID: "job-id"},
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference = &bq.JobReference{
JobId: "job-id",
ProjectId: "client-project-id",
}
j.JobReference.JobId = "job-id"
return j
}(),
},
}
for _, tc := range testCases {
for i, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
@ -126,11 +124,9 @@ func TestCopy(t *testing.T) {
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config
if _, err := copier.Run(context.Background()); err != nil {
t.Errorf("err calling Run: %v", err)
t.Errorf("#%d: err calling Run: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -1,103 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type createTableRecorder struct {
conf *createTableConf
service
}
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
rec.conf = conf
return nil
}
func TestCreateTableOptions(t *testing.T) {
s := &createTableRecorder{}
c := &Client{
projectID: "p",
service: s,
}
ds := c.Dataset("d")
table := ds.Table("t")
exp := time.Now()
q := "query"
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want := createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
viewQuery: q,
useStandardSQL: true,
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
// No need for an elaborate schema, that is tested in schema_test.go.
schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
partitionCases := []struct {
timePartitioning TimePartitioning
expectedExpiration time.Duration
}{
{TimePartitioning{}, time.Duration(0)},
{TimePartitioning{time.Second}, time.Second},
}
for _, c := range partitionCases {
if err := table.Create(context.Background(), c.timePartitioning); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
timePartitioning: &TimePartitioning{c.expectedExpiration},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
}
}

View File

@ -17,6 +17,8 @@ package bigquery
import (
"time"
"cloud.google.com/go/internal/optional"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
@ -28,18 +30,57 @@ type Dataset struct {
c *Client
}
// DatasetMetadata contains information about a BigQuery dataset.
type DatasetMetadata struct {
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
DefaultTableExpiration time.Duration
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
ID string
// These fields can be set when creating a dataset.
Name string // The user-friendly name for this dataset.
Description string // The user-friendly description of this dataset.
Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels.
// These fields are read-only.
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
FullID string // The full dataset ID in the form projectID:datasetID.
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
// TODO(jba): access rules
}
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
// Only non-nil fields will be updated.
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified when dm is used
// in a call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
if dm.setLabels == nil {
dm.setLabels = map[string]string{}
}
dm.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted when dm is used in a
// call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
if dm.deleteLabels == nil {
dm.deleteLabels = map[string]bool{}
}
dm.deleteLabels[name] = true
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
@ -54,10 +95,10 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
}
}
// Create creates a dataset in the BigQuery service. An error will be returned
// if the dataset already exists.
func (d *Dataset) Create(ctx context.Context) error {
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID)
// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID, md)
}
// Delete deletes the dataset.
@ -70,6 +111,14 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
}
// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag)
}
// Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it.
@ -126,17 +175,21 @@ func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
return tok, nil
}
// Datasets returns an iterator over the datasets in the Client's project.
// Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID)
}
// DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{
ctx: ctx,
c: c,
projectID: projectID,
ProjectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
@ -148,18 +201,23 @@ func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *Datas
// DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool
// Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string
ctx context.Context
projectID string
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
// The project ID of the listed datasets.
// Set before the first call to Next.
ProjectID string
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
@ -175,7 +233,7 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
}
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID,
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID,
pageSize, pageToken, it.ListHidden, it.Filter)
if err != nil {
return "", err

View File

@ -16,10 +16,11 @@ package bigquery
import (
"errors"
"reflect"
"strings"
"testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -94,7 +95,7 @@ func TestErrorFromErrorProto(t *testing.T) {
want: &Error{Location: "L", Message: "M", Reason: "R"},
},
} {
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
}
}

View File

@ -17,6 +17,7 @@ package bigquery_test
import (
"fmt"
"os"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
@ -233,7 +234,8 @@ func ExampleDataset_Create() {
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Create(ctx); err != nil {
ds := client.Dataset("my_dataset")
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
// TODO: Handle error.
}
}
@ -262,6 +264,44 @@ func ExampleDataset_Metadata() {
fmt.Println(md)
}
// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleDataset_Table() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
@ -351,12 +391,13 @@ func ExampleTable_Create() {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx); err != nil {
if err := t.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Create_schema() {
// Initialize a new table by passing TableMetadata to Table.Create.
func ExampleTable_Create_initialize() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
@ -368,7 +409,12 @@ func ExampleTable_Create_schema() {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx, schema); err != nil {
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
ExpirationTime: time.Now().Add(24 * time.Hour),
}); err != nil {
// TODO: Handle error.
}
}
@ -476,6 +522,8 @@ func ExampleTable_LoaderFrom() {
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef)
@ -506,6 +554,8 @@ func ExampleTable_LoaderFrom_reader() {
}
rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs)
@ -534,7 +584,32 @@ func ExampleTable_Read() {
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleTable_Update() {
// This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
@ -543,7 +618,7 @@ func ExampleTable_Update() {
t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table",
})
}, "")
if err != nil {
// TODO: Handle error.
}

View File

@ -21,9 +21,12 @@ import (
// ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct {
// JobID is the ID to use for the extract job. If empty, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the table from which data will be extracted.
Src *Table
@ -55,22 +58,23 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
conf := &bq.JobConfigurationExtract{}
job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}}
setJobRef(job, e.JobID, e.c.projectID)
conf.DestinationUris = append([]string{}, e.Dst.uris...)
conf.Compression = string(e.Dst.Compression)
conf.DestinationFormat = string(e.Dst.DestinationFormat)
conf.FieldDelimiter = e.Dst.FieldDelimiter
conf.SourceTable = e.Src.tableRefProto()
var printHeader *bool
if e.DisableHeader {
f := false
conf.PrintHeader = &f
printHeader = &f
}
job := &bq.Job{
JobReference: createJobRef(e.JobID, e.AddJobIDSuffix, e.c.projectID),
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.uris...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.tableRefProto(),
PrintHeader: printHeader,
},
},
}
return e.c.insertJob(ctx, &insertJobConf{job: job})
}

View File

@ -15,7 +15,6 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
@ -25,10 +24,11 @@ import (
func defaultExtractJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -39,10 +39,11 @@ func defaultExtractJob() *bq.Job {
}
func TestExtract(t *testing.T) {
defer fixRandomJobID("RANDOM")()
s := &testService{}
c := &Client{
service: s,
projectID: "project-id",
projectID: "client-project-id",
}
testCases := []struct {
@ -86,17 +87,15 @@ func TestExtract(t *testing.T) {
},
}
for _, tc := range testCases {
for i, tc := range testCases {
ext := tc.src.ExtractorTo(tc.dst)
tc.config.Src = ext.Src
tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config
if _, err := ext.Run(context.Background()); err != nil {
t.Errorf("err calling extract: %v", err)
t.Errorf("#%d: err calling extract: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -15,10 +15,10 @@
package bigquery
import (
"reflect"
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -84,7 +84,7 @@ func TestPopulateLoadConfig(t *testing.T) {
}
got := &bq.JobConfigurationLoad{}
fc.populateLoadConfig(got)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
}
}

View File

@ -21,7 +21,6 @@ import (
"log"
"net/http"
"os"
"reflect"
"sort"
"strings"
"testing"
@ -44,9 +43,13 @@ var (
dataset *Dataset
schema = Schema{
{Name: "name", Type: StringFieldType},
{Name: "num", Type: IntegerFieldType},
{Name: "nums", Type: IntegerFieldType, Repeated: true},
{Name: "rec", Type: RecordFieldType, Schema: Schema{
{Name: "bool", Type: BooleanFieldType},
}},
}
testTableExpiration time.Time
datasetIDs = testutil.NewUIDSpace("dataset")
)
func TestMain(m *testing.M) {
@ -80,13 +83,13 @@ func initIntegrationTest() {
log.Fatalf("NewClient: %v", err)
}
dataset = client.Dataset("bigquery_integration_test")
if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
if err := dataset.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
log.Fatalf("creating dataset: %v", err)
}
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
}
func TestIntegration_Create(t *testing.T) {
func TestIntegration_TableCreate(t *testing.T) {
// Check that creating a record field with an empty schema is an error.
if client == nil {
t.Skip("Integration tests skipped")
@ -95,7 +98,10 @@ func TestIntegration_Create(t *testing.T) {
schema := Schema{
{Name: "rec", Type: RecordFieldType, Schema: Schema{}},
}
err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute)))
err := table.Create(context.Background(), &TableMetadata{
Schema: schema,
ExpirationTime: time.Now().Add(5 * time.Minute),
})
if err == nil {
t.Fatal("want error, got nil")
}
@ -104,7 +110,7 @@ func TestIntegration_Create(t *testing.T) {
}
}
func TestIntegration_CreateView(t *testing.T) {
func TestIntegration_TableCreateView(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
@ -114,8 +120,12 @@ func TestIntegration_CreateView(t *testing.T) {
// Test that standard SQL views work.
view := dataset.Table("t_view_standardsql")
query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID))
err := view.Create(context.Background(), UseStandardSQL(), query)
query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`",
dataset.ProjectID, dataset.DatasetID, table.TableID)
err := view.Create(context.Background(), &TableMetadata{
ViewQuery: query,
UseStandardSQL: true,
})
if err != nil {
t.Fatalf("table.create: Did not expect an error, got: %v", err)
}
@ -135,8 +145,8 @@ func TestIntegration_TableMetadata(t *testing.T) {
t.Fatal(err)
}
// TODO(jba): check md more thorougly.
if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
t.Errorf("metadata.ID: got %q, want %q", got, want)
if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
t.Errorf("metadata.FullID: got %q, want %q", got, want)
}
if got, want := md.Type, RegularTable; got != want {
t.Errorf("metadata.Type: got %v, want %v", got, want)
@ -160,7 +170,11 @@ func TestIntegration_TableMetadata(t *testing.T) {
}
for i, c := range partitionCases {
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute)))
err = table.Create(context.Background(), &TableMetadata{
Schema: schema,
TimePartitioning: &c.timePartitioning,
ExpirationTime: time.Now().Add(5 * time.Minute),
})
if err != nil {
t.Fatal(err)
}
@ -172,12 +186,39 @@ func TestIntegration_TableMetadata(t *testing.T) {
got := md.TimePartitioning
want := &TimePartitioning{c.expectedExpiration}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
}
}
}
func TestIntegration_DatasetCreate(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
uid := strings.Replace(datasetIDs.New(), "-", "_", -1)
ds := client.Dataset(uid)
wmd := &DatasetMetadata{Name: "name", Location: "EU"}
err := ds.Create(ctx, wmd)
if err != nil {
t.Fatal(err)
}
gmd, err := ds.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if got, want := gmd.Name, wmd.Name; got != want {
t.Errorf("name: got %q, want %q", got, want)
}
if got, want := gmd.Location, wmd.Location; got != want {
t.Errorf("location: got %q, want %q", got, want)
}
if err := ds.Delete(ctx); err != nil {
t.Fatalf("deleting dataset %s: %v", ds, err)
}
}
func TestIntegration_DatasetMetadata(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -187,8 +228,8 @@ func TestIntegration_DatasetMetadata(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
t.Errorf("ID: got %q, want %q", got, want)
if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
t.Errorf("FullID: got %q, want %q", got, want)
}
jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
if md.CreationTime.Before(jan2016) {
@ -211,7 +252,7 @@ func TestIntegration_DatasetDelete(t *testing.T) {
}
ctx := context.Background()
ds := client.Dataset("delete_test")
if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
if err := ds.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
t.Fatalf("creating dataset %s: %v", ds, err)
}
if err := ds.Delete(ctx); err != nil {
@ -219,6 +260,117 @@ func TestIntegration_DatasetDelete(t *testing.T) {
}
}
func TestIntegration_DatasetUpdateETags(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
check := func(md *DatasetMetadata, wantDesc, wantName string) {
if md.Description != wantDesc {
t.Errorf("description: got %q, want %q", md.Description, wantDesc)
}
if md.Name != wantName {
t.Errorf("name: got %q, want %q", md.Name, wantName)
}
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if md.ETag == "" {
t.Fatal("empty ETag")
}
// Write without ETag succeeds.
desc := md.Description + "d2"
name := md.Name + "n2"
md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "")
if err != nil {
t.Fatal(err)
}
check(md2, desc, name)
// Write with original ETag fails because of intervening write.
_, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag)
if err == nil {
t.Fatal("got nil, want error")
}
// Write with most recent ETag succeeds.
md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag)
if err != nil {
t.Fatal(err)
}
check(md3, "", "")
}
func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// Set the default expiration time.
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Omitting DefaultTableExpiration doesn't change it.
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Setting it to 0 deletes it (which looks like a 0 duration).
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != 0 {
t.Fatalf("got %s, want 0", md.DefaultTableExpiration)
}
}
func TestIntegration_DatasetUpdateLabels(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// TODO(jba): use a separate dataset for each test run so
// tests don't interfere with each other.
var dm DatasetMetadataToUpdate
dm.SetLabel("label", "value")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if got, want := md.Labels["label"], "value"; got != want {
t.Errorf("got %q, want %q", got, want)
}
dm = DatasetMetadataToUpdate{}
dm.DeleteLabel("label")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if _, ok := md.Labels["label"]; ok {
t.Error("label still present after deletion")
}
}
func TestIntegration_Tables(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -275,7 +427,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
saverRows []*ValuesSaver
)
for i, name := range []string{"a", "b", "c"} {
row := []Value{name, int64(i)}
row := []Value{name, []Value{int64(i)}, []Value{true}}
wantRows = append(wantRows, row)
saverRows = append(saverRows, &ValuesSaver{
Schema: schema,
@ -297,7 +449,8 @@ func TestIntegration_UploadAndRead(t *testing.T) {
checkRead(t, "upload", table.Read(ctx), wantRows)
// Query the table.
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
q.UseStandardSQL = true
q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID
@ -347,7 +500,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
t.Fatal(err)
}
want := []Value(vl)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%d: got %v, want %v", i, got, want)
}
}
@ -362,9 +515,11 @@ func TestIntegration_UploadAndRead(t *testing.T) {
if got, want := len(vm), len(vl); got != want {
t.Fatalf("valueMap len: got %d, want %d", got, want)
}
// With maps, structs become nested maps.
vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]}
for i, v := range vl {
if got, want := vm[schema[i].Name], v; got != want {
t.Errorf("%d, name=%s: got %v, want %v",
if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) {
t.Errorf("%d, name=%s: got %#v, want %#v",
i, schema[i].Name, got, want)
}
}
@ -509,7 +664,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
for i, g := range got {
if i >= len(want) {
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
} else if w := want[i]; !reflect.DeepEqual(g, w) {
} else if w := want[i]; !testutil.Equal(g, w) {
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
}
}
@ -521,7 +676,7 @@ func (b byName) Len() int { return len(b) }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func TestIntegration_Update(t *testing.T) {
func TestIntegration_TableUpdate(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
@ -536,10 +691,12 @@ func TestIntegration_Update(t *testing.T) {
}
wantDescription := tm.Description + "more"
wantName := tm.Name + "more"
wantExpiration := tm.ExpirationTime.Add(time.Hour * 24)
got, err := table.Update(ctx, TableMetadataToUpdate{
Description: wantDescription,
Name: wantName,
})
Description: wantDescription,
Name: wantName,
ExpirationTime: wantExpiration,
}, tm.ETag)
if err != nil {
t.Fatal(err)
}
@ -549,10 +706,24 @@ func TestIntegration_Update(t *testing.T) {
if got.Name != wantName {
t.Errorf("Name: got %q, want %q", got.Name, wantName)
}
if !reflect.DeepEqual(got.Schema, schema) {
if got.ExpirationTime != wantExpiration {
t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration)
}
if !testutil.Equal(got.Schema, schema) {
t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
}
// Blind write succeeds.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "")
if err != nil {
t.Fatal(err)
}
// Write with old etag fails.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag)
if err == nil {
t.Fatal("Update with old ETag succeeded, wanted failure")
}
// Test schema update.
// Columns can be added. schema2 is the same as schema, except for the
// added column in the middle.
@ -562,63 +733,57 @@ func TestIntegration_Update(t *testing.T) {
}
schema2 := Schema{
schema[0],
{Name: "rec", Type: RecordFieldType, Schema: nested},
{Name: "rec2", Type: RecordFieldType, Schema: nested},
schema[1],
schema[2],
}
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2})
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "")
if err != nil {
t.Fatal(err)
}
// Wherever you add the column, it appears at the end.
schema3 := Schema{schema2[0], schema2[2], schema2[1]}
if !reflect.DeepEqual(got.Schema, schema3) {
schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]}
if !testutil.Equal(got.Schema, schema3) {
t.Errorf("add field:\ngot %v\nwant %v",
pretty.Value(got.Schema), pretty.Value(schema3))
}
// Updating with the empty schema succeeds, but is a no-op.
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}})
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got.Schema, schema3) {
if !testutil.Equal(got.Schema, schema3) {
t.Errorf("empty schema:\ngot %v\nwant %v",
pretty.Value(got.Schema), pretty.Value(schema3))
}
// Error cases.
// Error cases when updating schema.
for _, test := range []struct {
desc string
fields []*FieldSchema
}{
{"change from optional to required", []*FieldSchema{
schema3[0],
{Name: "num", Type: IntegerFieldType, Required: true},
{Name: "name", Type: StringFieldType, Required: true},
schema3[1],
schema3[2],
schema3[3],
}},
{"add a required field", []*FieldSchema{
schema3[0], schema3[1], schema3[2],
schema3[0], schema3[1], schema3[2], schema3[3],
{Name: "req", Type: StringFieldType, Required: true},
}},
{"remove a field", []*FieldSchema{schema3[0], schema3[1]}},
{"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}},
{"remove a nested field", []*FieldSchema{
schema3[0], schema3[1],
{Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
{"remove all nested fields", []*FieldSchema{
schema3[0], schema3[1],
{Name: "rec", Type: RecordFieldType, Schema: Schema{}}}},
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
} {
for {
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)})
if !hasStatusCode(err, 403) {
break
}
// We've hit the rate limit for updates. Wait a bit and retry.
t.Logf("%s: retrying after getting %v", test.desc, err)
time.Sleep(4 * time.Second)
}
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "")
if err == nil {
t.Errorf("%s: want error, got nil", test.desc)
} else if !hasStatusCode(err, 400) {
@ -632,7 +797,11 @@ func TestIntegration_Load(t *testing.T) {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
// CSV data can't be loaded into a repeated field, so we use a different schema.
table := newTable(t, Schema{
{Name: "name", Type: StringFieldType},
{Name: "nums", Type: IntegerFieldType},
})
defer table.Delete(ctx)
// Load the table from a reader.
@ -667,20 +836,24 @@ func TestIntegration_DML(t *testing.T) {
// Use DML to insert.
wantRows := [][]Value{
[]Value{"a", int64(0)},
[]Value{"b", int64(1)},
[]Value{"c", int64(2)},
[]Value{"a", []Value{int64(0)}, []Value{true}},
[]Value{"b", []Value{int64(1)}, []Value{false}},
[]Value{"c", []Value{int64(2)}, []Value{true}},
}
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
"VALUES ('a', 0), ('b', 1), ('c', 2)",
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
"VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
table.TableID)
q := client.Query(query)
q.UseStandardSQL = true // necessary for DML
job, err := q.Run(ctx)
if err != nil {
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
return true, err // fail on 4xx
}
return false, err
}
if err := wait(ctx, job); err != nil {
fmt.Printf("wait: %v\n", err)
return false, err
}
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
@ -819,6 +992,7 @@ func TestIntegration_LegacyQuery(t *testing.T) {
}
for _, c := range testCases {
q := client.Query(c.query)
q.UseLegacySQL = true
it, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
@ -891,7 +1065,7 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
upl := table.Uploader()
row := &ValuesSaver{
Schema: schema,
Row: []Value{"name", nil},
Row: []Value{nil, []Value{}, []Value{nil}},
}
if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
t.Fatal(putError(err))
@ -900,25 +1074,128 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
t.Fatal(err)
}
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
q := client.Query(fmt.Sprintf("select name from %s", table.TableID))
q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID
it, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
}
type S struct{ Num int64 }
type S struct{ Name string }
var s S
if err := it.Next(&s); err == nil {
t.Fatal("got nil, want error")
}
}
const (
stdName = "`bigquery-public-data.samples.shakespeare`"
legacyName = "[bigquery-public-data:samples.shakespeare]"
)
// These tests exploit the fact that the two SQL versions have different syntaxes for
// fully-qualified table names.
var useLegacySqlTests = []struct {
t string // name of table
std, legacy bool // use standard/legacy SQL
err bool // do we expect an error?
}{
{t: legacyName, std: false, legacy: true, err: false},
{t: legacyName, std: true, legacy: false, err: true},
{t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default
{t: legacyName, std: true, legacy: true, err: true},
{t: stdName, std: false, legacy: true, err: true},
{t: stdName, std: true, legacy: false, err: false},
{t: stdName, std: false, legacy: false, err: false}, // standard SQL is default
{t: stdName, std: true, legacy: true, err: true},
}
func TestIntegration_QueryUseLegacySQL(t *testing.T) {
// Test the UseLegacySQL and UseStandardSQL options for queries.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
for _, test := range useLegacySqlTests {
q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
q.UseStandardSQL = test.std
q.UseLegacySQL = test.legacy
_, err := q.Read(ctx)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
}
}
func TestIntegration_TableUseLegacySQL(t *testing.T) {
// Test UseLegacySQL and UseStandardSQL for Table.Create.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
defer table.Delete(ctx)
for i, test := range useLegacySqlTests {
view := dataset.Table(fmt.Sprintf("t_view_%d", i))
tm := &TableMetadata{
ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
UseStandardSQL: test.std,
UseLegacySQL: test.legacy,
}
err := view.Create(ctx, tm)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
view.Delete(ctx)
}
}
func TestIntegration_ListJobs(t *testing.T) {
// It's difficult to test the list of jobs, because we can't easily
// control what's in it. Also, there are many jobs in the test project,
// and it takes considerable time to list them all.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
// About all we can do is list a few jobs.
const max = 20
var jis []JobInfo
it := client.Jobs(ctx)
for {
ji, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
jis = append(jis, ji)
if len(jis) >= max {
break
}
}
// We expect that there is at least one job in the last few months.
if len(jis) == 0 {
t.Fatal("did not get any jobs")
}
}
// Creates a new, temporary table with a unique name and the given schema.
func newTable(t *testing.T, s Schema) *Table {
name := fmt.Sprintf("t%d", time.Now().UnixNano())
table := dataset.Table(name)
err := table.Create(context.Background(), s, TableExpiration(testTableExpiration))
err := table.Create(context.Background(), &TableMetadata{
Schema: s,
ExpirationTime: testTableExpiration,
})
if err != nil {
t.Fatal(err)
}
@ -943,8 +1220,8 @@ func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
for i, r := range got {
gotRow := []Value(r)
wantRow := want[i]
if !reflect.DeepEqual(gotRow, wantRow) {
return fmt.Sprintf("#%d: got %v, want %v", i, gotRow, wantRow), false
if !testutil.Equal(gotRow, wantRow) {
return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false
}
}
return "", true

View File

@ -70,7 +70,7 @@ type RowIterator struct {
//
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's
// value.
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
//
// If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case.
@ -89,8 +89,8 @@ type RowIterator struct {
// TIME civil.Time
// DATETIME civil.DateTime
//
// A repeated field corresponds to a slice or array of the element type.
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
// A repeated field corresponds to a slice or array of the element type. A STRUCT
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field.

View File

@ -17,9 +17,10 @@ package bigquery
import (
"errors"
"fmt"
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
@ -246,10 +247,10 @@ func TestIterator(t *testing.T) {
if err != tc.wantErr {
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
}
@ -339,7 +340,7 @@ func TestNextAfterFinished(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
}
// Try calling Get again.

View File

@ -16,12 +16,17 @@ package bigquery
import (
"errors"
"fmt"
"math/rand"
"os"
"sync"
"time"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
// A Job represents an operation which has been submitted to BigQuery for processing.
@ -46,6 +51,7 @@ func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
return job, nil
}
// ID returns the job's ID.
func (j *Job) ID() string {
return j.jobID
}
@ -54,7 +60,8 @@ func (j *Job) ID() string {
type State int
const (
Pending State = iota
StateUnspecified State = iota // used only as a default in JobIterator
Pending
Running
Done
)
@ -73,21 +80,43 @@ type JobStatus struct {
Statistics *JobStatistics
}
// setJobRef initializes job's JobReference if given a non-empty jobID.
// createJobRef creates a JobReference.
// projectID must be non-empty.
func setJobRef(job *bq.Job, jobID, projectID string) {
func createJobRef(jobID string, addJobIDSuffix bool, projectID string) *bq.JobReference {
if jobID == "" {
return
jobID = randomJobIDFn()
} else if addJobIDSuffix {
jobID += "-" + randomJobIDFn()
}
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
job.JobReference = &bq.JobReference{
return &bq.JobReference{
JobId: jobID,
ProjectId: projectID,
}
}
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)
// For testing.
var randomJobIDFn = randomJobID
func randomJobID() string {
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for suffixes.
var b [27]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}
// Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
func (s *JobStatus) Done() bool {
@ -99,20 +128,25 @@ func (s *JobStatus) Err() error {
return s.err
}
// Fill in the client field of Tables in the statistics.
func (s *JobStatus) setClient(c *Client) {
if s.Statistics == nil {
return
}
if qs, ok := s.Statistics.Details.(*QueryStatistics); ok {
for _, t := range qs.ReferencedTables {
t.c = c
}
}
}
// Status returns the current status of the job. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
if err != nil {
return nil, err
}
// Fill in the client field of Tables in the statistics.
if js.Statistics != nil {
if qs, ok := js.Statistics.Details.(*QueryStatistics); ok {
for _, t := range qs.ReferencedTables {
t.c = j.c
}
}
}
js.setClient(j.c)
return js, nil
}
@ -324,3 +358,73 @@ type ExplainQueryStep struct {
func (*ExtractStatistics) implementsStatistics() {}
func (*LoadStatistics) implementsStatistics() {}
func (*QueryStatistics) implementsStatistics() {}
// Jobs lists jobs within a project.
func (c *Client) Jobs(ctx context.Context) *JobIterator {
it := &JobIterator{
ctx: ctx,
c: c,
ProjectID: c.projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// A JobInfo consists of a Job and a JobStatus.
type JobInfo struct {
Job *Job
Status *JobStatus
}
// JobIterator iterates over jobs in a project.
type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project.
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
State State // List only jobs in the given state. Defaults to all states.
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []JobInfo
}
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *JobIterator) Next() (JobInfo, error) {
if err := it.nextFunc(); err != nil {
return JobInfo{}, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
var st string
switch it.State {
case StateUnspecified:
st = ""
case Pending:
st = "pending"
case Running:
st = "running"
case Done:
st = "done"
default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
}
jobInfos, nextPageToken, err := it.c.service.listJobs(it.ctx, it.ProjectID, pageSize, pageToken, it.AllUsers, st)
if err != nil {
return "", err
}
for _, ji := range jobInfos {
ji.Job.c = it.c
ji.Status.setClient(it.c)
it.items = append(it.items, ji)
}
return nextPageToken, nil
}

95
vendor/cloud.google.com/go/bigquery/job_test.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func TestCreateJobRef(t *testing.T) {
defer fixRandomJobID("RANDOM")()
for _, test := range []struct {
jobID string
addJobIDSuffix bool
want string
}{
{
jobID: "foo",
addJobIDSuffix: false,
want: "foo",
},
{
jobID: "",
addJobIDSuffix: false,
want: "RANDOM",
},
{
jobID: "",
addJobIDSuffix: true, // irrelevant
want: "RANDOM",
},
{
jobID: "foo",
addJobIDSuffix: true,
want: "foo-RANDOM",
},
} {
jr := createJobRef(test.jobID, test.addJobIDSuffix, "projectID")
got := jr.JobId
if got != test.want {
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
}
}
}
func fixRandomJobID(s string) func() {
prev := randomJobIDFn
randomJobIDFn = func() string { return s }
return func() { randomJobIDFn = prev }
}
func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want)
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
}
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
s.Job = conf.job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@ -21,9 +21,12 @@ import (
// LoadConfig holds the configuration for a load job.
type LoadConfig struct {
// JobID is the ID to use for the load job. If unset, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the source from which data will be loaded.
Src LoadSource
@ -56,6 +59,8 @@ type LoadSource interface {
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{
c: t.c,
@ -69,6 +74,7 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{
JobReference: createJobRef(l.JobID, l.AddJobIDSuffix, l.c.projectID),
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
@ -78,9 +84,6 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) {
}
conf := &insertJobConf{job: job}
l.Src.populateInsertJobConfForLoad(conf)
setJobRef(job, l.JobID, l.c.projectID)
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
return l.c.insertJob(ctx, conf)
}

View File

@ -15,22 +15,21 @@
package bigquery
import (
"reflect"
"strings"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/pretty"
bq "google.golang.org/api/bigquery/v2"
)
func defaultLoadJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -68,7 +67,8 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
}
func TestLoad(t *testing.T) {
c := &Client{projectID: "project-id"}
defer fixRandomJobID("RANDOM")()
c := &Client{projectID: "client-project-id"}
testCases := []struct {
dst *Table
@ -95,7 +95,7 @@ func TestLoad(t *testing.T) {
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.JobReference = &bq.JobReference{
JobId: "ajob",
ProjectId: "project-id",
ProjectId: "client-project-id",
}
return j
}(),
@ -218,12 +218,9 @@ func TestLoad(t *testing.T) {
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config
if _, err := loader.Run(context.Background()); err != nil {
t.Errorf("%d: err calling Loader.Run: %v", i, err)
t.Errorf("#%d: err calling Loader.Run: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
i, pretty.Value(s.Job), pretty.Value(tc.want))
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -21,7 +21,10 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -74,7 +77,7 @@ func TestParamValueScalar(t *testing.T) {
continue
}
want := sval(test.want)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
}
}
@ -99,7 +102,7 @@ func TestParamValueArray(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
}
}
@ -121,7 +124,7 @@ func TestParamValueStruct(t *testing.T) {
"C": sval("true"),
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v\nwant %+v", got, want)
}
}
@ -172,7 +175,7 @@ func TestParamType(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
}
}
@ -196,7 +199,9 @@ func TestIntegration_ScalarParam(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !equal(got, test.val) {
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool {
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
})) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
}
}
@ -219,7 +224,7 @@ func TestIntegration_OtherParam(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !equal(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
}
}
@ -242,21 +247,3 @@ func paramRoundTrip(c *Client, x interface{}) (Value, error) {
}
return val[0], nil
}
func equal(x1, x2 interface{}) bool {
if reflect.TypeOf(x1) != reflect.TypeOf(x2) {
return false
}
switch x1 := x1.(type) {
case float64:
if math.IsNaN(x1) {
return math.IsNaN(x2.(float64))
}
return x1 == x2
case time.Time:
// BigQuery is only accurate to the microsecond.
return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond))
default:
return reflect.DeepEqual(x1, x2)
}
}

View File

@ -15,16 +15,20 @@
package bigquery
import (
"errors"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// QueryConfig holds the configuration for a query job.
type QueryConfig struct {
// JobID is the ID to use for the query job. If this field is empty, a job ID
// will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created.
Dst *Table
@ -85,10 +89,12 @@ type QueryConfig struct {
// used.
MaxBytesBilled int64
// UseStandardSQL causes the query to use standard SQL.
// The default is false (using legacy SQL).
// UseStandardSQL causes the query to use standard SQL. The default.
UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool
// Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name.
@ -123,12 +129,11 @@ func (c *Client) Query(q string) *Query {
// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{
JobReference: createJobRef(q.JobID, q.AddJobIDSuffix, q.client.projectID),
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{},
},
}
setJobRef(job, q.JobID, q.client.projectID)
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
return nil, err
}
@ -177,11 +182,18 @@ func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) err
if q.MaxBytesBilled >= 1 {
conf.MaximumBytesBilled = q.MaxBytesBilled
}
if q.UseStandardSQL || len(q.Parameters) > 0 {
if q.UseStandardSQL && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(q.Parameters) > 0 && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
if q.UseLegacySQL {
conf.UseLegacySql = true
} else {
conf.UseLegacySql = false
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
}
if q.Dst != nil && !q.Dst.implicitTable() {
conf.DestinationTable = q.Dst.tableRefProto()
}

View File

@ -15,9 +15,10 @@
package bigquery
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -25,10 +26,11 @@ import (
func defaultQueryJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -37,14 +39,17 @@ func defaultQueryJob() *bq.Job {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
}
}
func TestQuery(t *testing.T) {
defer fixRandomJobID("RANDOM")()
c := &Client{
projectID: "project-id",
projectID: "client-project-id",
}
testCases := []struct {
dst *Table
@ -67,6 +72,20 @@ func TestQuery(t *testing.T) {
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
JobID: "jobID",
AddJobIDSuffix: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
j.JobReference.JobId = "jobID-RANDOM"
return j
}(),
},
{
dst: &Table{},
src: defaultQuery,
@ -143,6 +162,7 @@ func TestQuery(t *testing.T) {
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j
@ -242,27 +262,35 @@ func TestQuery(t *testing.T) {
DefaultDatasetID: "def-dataset-id",
UseStandardSQL: true,
},
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseLegacySQL: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.UseLegacySql = false
j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"}
j.Configuration.Query.UseLegacySql = true
j.Configuration.Query.ForceSendFields = nil
return j
}(),
},
}
for _, tc := range testCases {
for i, tc := range testCases {
s := &testService{}
c.service = s
query := c.Query("")
query.QueryConfig = *tc.src
query.Dst = tc.dst
if _, err := query.Run(context.Background()); err != nil {
t.Errorf("err calling query: %v", err)
t.Errorf("#%d: err calling query: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}
@ -288,6 +316,8 @@ func TestConfiguringQuery(t *testing.T) {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
JobReference: &bq.JobReference{
@ -299,7 +329,28 @@ func TestConfiguringQuery(t *testing.T) {
if _, err := query.Run(context.Background()); err != nil {
t.Fatalf("err calling Query.Run: %v", err)
}
if !reflect.DeepEqual(s.Job, want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
if diff := testutil.Diff(s.Job, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff)
}
}
func TestQueryLegacySQL(t *testing.T) {
c := &Client{
projectID: "project-id",
service: &testService{},
}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.Run(context.Background())
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.Run(context.Background())
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
}
}

View File

@ -16,9 +16,12 @@ package bigquery
import (
"errors"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
@ -113,7 +116,7 @@ func TestRead(t *testing.T) {
service.values = tc.data
service.pageTokens = tc.pageTokens
if got, ok := collectValues(t, readFunc()); ok {
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
@ -210,7 +213,7 @@ func TestReadTabledataOptions(t *testing.T) {
tok: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
}
}
@ -254,7 +257,7 @@ func TestReadQueryOptions(t *testing.T) {
tok: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
}
}

View File

@ -77,11 +77,6 @@ func (s Schema) asTableSchema() *bq.TableSchema {
return &bq.TableSchema{Fields: fields}
}
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
func (s Schema) customizeCreateTable(conf *createTableConf) {
conf.schema = s.asTableSchema()
}
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{
Description: tfs.Description,

View File

@ -22,6 +22,7 @@ import (
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -192,12 +193,12 @@ func TestSchemaConversion(t *testing.T) {
for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema()
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
if !testutil.Equal(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
}
schema := convertTableSchema(tc.bqSchema)
if !reflect.DeepEqual(schema, tc.schema) {
if !testutil.Equal(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
}
}
@ -311,7 +312,7 @@ func TestSimpleInference(t *testing.T) {
if err != nil {
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
@ -414,7 +415,7 @@ func TestNestedInference(t *testing.T) {
if err != nil {
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
@ -483,7 +484,7 @@ func TestRepeatedInference(t *testing.T) {
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
@ -512,7 +513,7 @@ func TestEmbeddedInference(t *testing.T) {
reqField("Embedded", "INTEGER"),
reqField("Embedded2", "INTEGER"),
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
}
}
@ -617,7 +618,7 @@ func TestTagInference(t *testing.T) {
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
@ -675,7 +676,7 @@ func TestTagInferenceErrors(t *testing.T) {
for i, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
if got != want {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
}
}
@ -746,7 +747,7 @@ func TestSchemaErrors(t *testing.T) {
for _, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
if got != want {
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
}
}

View File

@ -15,13 +15,14 @@
package bigquery
import (
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
@ -40,24 +41,26 @@ type service interface {
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
listJobs(ctx context.Context, projectId string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error)
// Tables
createTable(ctx context.Context, conf *createTableConf) error
createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string) error
insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error
deleteDataset(ctx context.Context, datasetID, projectID string) error
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
// Misc
@ -177,7 +180,6 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
setClientHeader(req.Header())
if pageToken != "" {
req.PageToken(pageToken)
} else {
@ -189,33 +191,37 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
}
// Fetch the table schema in the background, if necessary.
var schemaErr error
var schemaFetch sync.WaitGroup
if conf.schema == nil {
schemaFetch.Add(1)
errc := make(chan error, 1)
if conf.schema != nil {
errc <- nil
} else {
go func() {
defer schemaFetch.Done()
var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
if schemaErr == nil && t.Schema != nil {
err := runWithRetry(ctx, func() (err error) {
t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
errc <- err
}()
}
res, err := req.Context(ctx).Do()
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
schemaFetch.Wait()
if schemaErr != nil {
return nil, schemaErr
err = <-errc
if err != nil {
return nil, err
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
@ -276,12 +282,11 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
Json: m,
})
}
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err := runWithRetry(ctx, func() error {
var err error
req := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
setClientHeader(req.Header())
res, err = req.Do()
err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
@ -309,25 +314,41 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
}
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("configuration").
Context(ctx).
Do()
bqjob, err := s.getJobInternal(ctx, projectID, jobID, "configuration", "jobReference")
if err != nil {
return nil, err
}
var isQuery bool
var dest *bq.TableReference
if res.Configuration.Query != nil {
isQuery = true
dest = res.Configuration.Query.DestinationTable
return jobFromProtos(bqjob.JobReference, bqjob.Configuration), nil
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
if err != nil {
return nil, err
}
return &Job{
projectID: projectID,
jobID: jobID,
isQuery: isQuery,
destinationTable: dest,
}, nil
st, err := jobStatusFromProto(job.Status)
if err != nil {
return nil, err
}
st.Statistics = jobStatisticsFromProto(job.Statistics)
return st, nil
}
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := s.s.Jobs.Get(projectID, jobID).Context(ctx)
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
@ -336,27 +357,29 @@ func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
_, err := s.s.Jobs.Cancel(projectID, jobID).
call := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("status", "statistics"). // Only fetch what we need.
Context(ctx).
Do()
if err != nil {
return nil, err
func jobFromProtos(jr *bq.JobReference, config *bq.JobConfiguration) *Job {
var isQuery bool
var dest *bq.TableReference
if config.Query != nil {
isQuery = true
dest = config.Query.DestinationTable
}
st, err := jobStatusFromProto(res.Status)
if err != nil {
return nil, err
return &Job{
projectID: jr.ProjectId,
jobID: jr.JobId,
isQuery: isQuery,
destinationTable: dest,
}
st.Statistics = jobStatisticsFromProto(res.Statistics)
return st, nil
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
@ -465,7 +488,11 @@ func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID s
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
var res *bq.TableList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
@ -475,61 +502,98 @@ func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID s
return tables, res.NextPageToken, nil
}
type createTableConf struct {
projectID, datasetID, tableID string
expiration time.Time
viewQuery string
schema *bq.TableSchema
useStandardSQL bool
timePartitioning *TimePartitioning
}
// createTable creates a table in the BigQuery service.
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
// If viewQuery is non-empty, the created table will be of type VIEW.
// If tm.ViewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
table := &bq.Table{
TableReference: &bq.TableReference{
ProjectId: conf.projectID,
DatasetId: conf.datasetID,
TableId: conf.tableID,
},
func (s *bigqueryService) createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error {
table, err := bqTableFromMetadata(tm)
if err != nil {
return err
}
if !conf.expiration.IsZero() {
table.ExpirationTime = conf.expiration.UnixNano() / 1e6
table.TableReference = &bq.TableReference{
ProjectId: projectID,
DatasetId: datasetID,
TableId: tableID,
}
// TODO(jba): make it impossible to provide both a view query and a schema.
if conf.viewQuery != "" {
table.View = &bq.ViewDefinition{
Query: conf.viewQuery,
req := s.s.Tables.Insert(projectID, datasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func bqTableFromMetadata(tm *TableMetadata) (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
if tm.Schema != nil {
t.Schema = tm.Schema.asTableSchema()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if conf.useStandardSQL {
table.View.UseLegacySql = false
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
if conf.schema != nil {
table.Schema = conf.schema
}
if conf.timePartitioning != nil {
table.TimePartitioning = &bq.TimePartitioning{
if tm.TimePartitioning != nil {
t.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000),
ExpirationMs: int64(tm.TimePartitioning.Expiration / time.Millisecond),
}
}
if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
req := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err := req.Do()
return err
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
table, err := req.Do()
var table *bq.Table
err := runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
@ -539,7 +603,7 @@ func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datas
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
return runWithRetry(ctx, func() error { return req.Do() })
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
@ -547,18 +611,20 @@ func bqTableToMetadata(t *bq.Table) *TableMetadata {
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
ID: t.Id,
FullID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.View = t.View.Query
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
if t.TimePartitioning != nil {
md.TimePartitioning = &TimePartitioning{
@ -583,9 +649,10 @@ func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
ID: d.Id,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
}
@ -610,12 +677,13 @@ func convertTableReference(tr *bq.TableReference) *Table {
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
Schema Schema
Description *string
Name *string
Schema Schema
ExpirationTime time.Time
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
@ -633,39 +701,135 @@ func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID,
t.Schema = conf.Schema.asTableSchema()
forceSend("Schema")
}
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
Context(ctx).
Do()
if err != nil {
if !conf.ExpirationTime.IsZero() {
t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var table *bq.Table
if err := runWithRetry(ctx, func() (err error) {
table, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
ds := &bq.Dataset{
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error {
// TODO(jba): retry?
ds, err := bqDatasetFromMetadata(dm)
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: datasetID}
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
setClientHeader(req.Header())
_, err := req.Do()
_, err = req.Do()
return err
}
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
ds := bqDatasetFromUpdateMetadata(dm)
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(ds2), nil
}
func bqDatasetFromMetadata(dm *DatasetMetadata) (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}
func bqDatasetFromUpdateMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.setLabels != nil || dm.deleteLabels != nil {
ds.Labels = map[string]string{}
for k, v := range dm.setLabels {
ds.Labels[k] = v
}
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
forceSend("Labels")
}
for l := range dm.deleteLabels {
ds.NullFields = append(ds.NullFields, "Labels."+l)
}
}
return ds
}
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
return runWithRetry(ctx, func() error { return req.Do() })
}
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
table, err := req.Do()
if err != nil {
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = req.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(table), nil
return bqDatasetToMetadata(ds), nil
}
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
@ -680,7 +844,11 @@ func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, ma
if filter != "" {
req.Filter(filter)
}
res, err := req.Do()
var res *bq.DatasetList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
@ -698,13 +866,54 @@ func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Datas
}
}
func (s *bigqueryService) listJobs(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error) {
req := s.s.Jobs.List(projectID).
Context(ctx).
PageToken(pageToken).
Projection("full").
AllUsers(all)
if state != "" {
req.StateFilter(state)
}
setClientHeader(req.Header())
if maxResults > 0 {
req.MaxResults(int64(maxResults))
}
res, err := req.Do()
if err != nil {
return nil, "", err
}
var jobInfos []JobInfo
for _, j := range res.Jobs {
ji, err := s.convertListedJob(j)
if err != nil {
return nil, "", err
}
jobInfos = append(jobInfos, ji)
}
return jobInfos, res.NextPageToken, nil
}
func (s *bigqueryService) convertListedJob(j *bq.JobListJobs) (JobInfo, error) {
st, err := jobStatusFromProto(j.Status)
if err != nil {
return JobInfo{}, err
}
st.Statistics = jobStatisticsFromProto(j.Statistics)
return JobInfo{
Job: jobFromProtos(j.JobReference, j.Configuration),
Status: st,
}, nil
}
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 2 * time.Second,
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
@ -717,7 +926,7 @@ func runWithRetry(ctx context.Context, call func() error) error {
})
}
// Use the criteria in https://cloud.google.com/bigquery/troubleshooting-errors.
// This is the correct definition of retryable according to the BigQuery team.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
@ -727,5 +936,5 @@ func retryableError(err error) bool {
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return reason == "backendError" && (e.Code == 500 || e.Code == 503)
return reason == "backendError" || reason == "rateLimitExceeded"
}

View File

@ -15,10 +15,11 @@
package bigquery
import (
"reflect"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -58,26 +59,193 @@ func TestBQTableToMetadata(t *testing.T) {
&TableMetadata{
Description: "desc",
Name: "fname",
View: "view-query",
ID: "id",
ViewQuery: "view-query",
FullID: "id",
Type: ExternalTable,
ExpirationTime: aTime.Truncate(time.Millisecond),
CreationTime: aTime.Truncate(time.Millisecond),
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumRows: 7,
TimePartitioning: &TimePartitioning{Expiration: time.Duration(7890) * time.Millisecond},
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: aTime,
},
ETag: "etag",
},
},
} {
got := bqTableToMetadata(test.in)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
}
}
}
func TestBQTableFromMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
for _, test := range []struct {
in *TableMetadata
want *bq.Table
}{
{nil, &bq.Table{}},
{&TableMetadata{}, &bq.Table{}},
{
&TableMetadata{
Name: "n",
Description: "d",
Schema: sc,
ExpirationTime: aTime,
},
&bq.Table{
FriendlyName: "n",
Description: "d",
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTimeMillis,
},
},
{
&TableMetadata{ViewQuery: "q"},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseLegacySQL: true,
TimePartitioning: &TimePartitioning{},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 0,
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{time.Second},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
},
},
},
} {
got, err := bqTableFromMetadata(test.in)
if err != nil {
t.Fatalf("%+v: %v", test.in, err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff)
}
}
// Errors
for _, in := range []*TableMetadata{
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query
{UseLegacySQL: true}, // UseLegacySQL without query
{UseStandardSQL: true}, // UseStandardSQL without query
// read-only fields
{FullID: "x"},
{Type: "x"},
{CreationTime: aTime},
{LastModifiedTime: aTime},
{NumBytes: 1},
{NumRows: 1},
{StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"},
} {
_, err := bqTableFromMetadata(in)
if err == nil {
t.Errorf("%+v: got nil, want error", in)
}
}
}
func TestBQDatasetFromMetadata(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
}},
} {
got, err := bqDatasetFromMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}
// Check that non-writeable fields are unset.
_, err := bqDatasetFromMetadata(&DatasetMetadata{FullID: "x"})
if err == nil {
t.Error("got nil, want error")
}
}
func TestBQDatasetFromUpdateMetadata(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got := bqDatasetFromUpdateMetadata(&dm)
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}

View File

@ -39,18 +39,39 @@ type Table struct {
// TableMetadata contains information about a BigQuery table.
type TableMetadata struct {
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
Schema Schema
View string
// The following fields can be set when creating a table.
ID string // An opaque ID uniquely identifying the table.
Type TableType
// The user-friendly name for the table.
Name string
// The user-friendly description of the table.
Description string
// The table schema. If provided on create, ViewQuery must be empty.
Schema Schema
// The query to use for a view. If provided on create, Schema must be nil.
ViewQuery string
// Use Legacy SQL for the view query.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseLegacySQL bool
// Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseStandardSQL bool
// If non-nil, the table is partitioned by time.
TimePartitioning *TimePartitioning
// The time when this table expires. If not set, the table will persist
// indefinitely. Expired tables will be deleted and their storage reclaimed.
ExpirationTime time.Time
// All the fields below are read-only.
FullID string // An opaque ID uniquely identifying the table.
Type TableType
CreationTime time.Time
LastModifiedTime time.Time
@ -62,13 +83,14 @@ type TableMetadata struct {
// This does not include data that is being buffered during a streaming insert.
NumRows uint64
// The time-based partitioning settings for this table.
TimePartitioning *TimePartitioning
// Contains information regarding this table's streaming buffer, if one is
// present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}
// TableCreateDisposition specifies the circumstances under which destination table will be created.
@ -111,6 +133,14 @@ const (
ExternalTable TableType = "EXTERNAL"
)
// TimePartitioning describes the time-based date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
type TimePartitioning struct {
// The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
}
// StreamingBuffer holds information about the streaming buffer.
type StreamingBuffer struct {
// A lower-bound estimate of the number of bytes currently in the streaming
@ -144,16 +174,9 @@ func (t *Table) implicitTable() bool {
}
// Create creates a table in the BigQuery service.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
conf := &createTableConf{
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
}
for _, o := range options {
o.customizeCreateTable(conf)
}
return t.c.service.createTable(ctx, conf)
// Pass in a TableMetadata value to configure the dataset.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
return t.c.service.createTable(ctx, t.ProjectID, t.DatasetID, t.TableID, tm)
}
// Metadata fetches the metadata for the table.
@ -166,53 +189,6 @@ func (t *Table) Delete(ctx context.Context) error {
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
}
// A CreateTableOption is an optional argument to CreateTable.
type CreateTableOption interface {
customizeCreateTable(*createTableConf)
}
type tableExpiration time.Time
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
conf.expiration = time.Time(opt)
}
type viewQuery string
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
// For more information see: https://cloud.google.com/bigquery/querying-data#views
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
conf.viewQuery = string(opt)
}
type useStandardSQL struct{}
// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL.
// The default setting is false (using legacy SQL).
func UseStandardSQL() CreateTableOption { return useStandardSQL{} }
func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
conf.useStandardSQL = true
}
// TimePartitioning is a CreateTableOption that can be used to set time-based
// date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
type TimePartitioning struct {
// (Optional) The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
}
func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
conf.timePartitioning = &opt
}
// Read fetches the contents of the table.
func (t *Table) Read(ctx context.Context) *RowIterator {
return newRowIterator(ctx, t.c.service, &readTableConf{
@ -223,7 +199,7 @@ func (t *Table) Read(ctx context.Context) *RowIterator {
}
// Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
var conf patchTableConf
if tm.Description != nil {
s := optional.ToString(tm.Description)
@ -234,7 +210,8 @@ func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMet
conf.Name = &s
}
conf.Schema = tm.Schema
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf)
conf.ExpirationTime = tm.ExpirationTime
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag)
}
// TableMetadataToUpdate is used when updating a table's metadata.
@ -250,4 +227,7 @@ type TableMetadataToUpdate struct {
// When updating a schema, you can add columns but not remove them.
Schema Schema
// TODO(jba): support updating the view
// ExpirationTime is the time when this table expires.
ExpirationTime time.Time
}

View File

@ -15,10 +15,12 @@
package bigquery
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
)
@ -133,7 +135,7 @@ func TestInsertsData(t *testing.T) {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
@ -265,7 +267,7 @@ func TestValueSavers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
}
// Make sure Save is successful.

View File

@ -14,11 +14,6 @@
package bigquery
import (
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultGCS() *GCSReference {
return &GCSReference{
uris: []string{"uri"},
@ -30,18 +25,3 @@ var defaultQuery = &QueryConfig{
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
s.Job = conf.job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@ -18,12 +18,14 @@ import (
"encoding/base64"
"fmt"
"math"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -50,7 +52,7 @@ func TestConvertBasicValues(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{"a", int64(1), 1.2, true, []byte("foo")}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -92,7 +94,7 @@ func TestConvertNullValues(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{nil}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -123,7 +125,7 @@ func TestBasicRepetition(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{int64(1), int64(2), int64(3)}}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -160,7 +162,7 @@ func TestNestedRecordContainingRepetition(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -234,7 +236,7 @@ func TestRepeatedRecordContainingRepetition(t *testing.T) {
},
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -330,7 +332,7 @@ func TestRepeatedRecordContainingRecord(t *testing.T) {
},
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
}
}
@ -417,7 +419,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
t.Errorf("Expected successful save; got: %v", err)
}
got := &insertionRow{insertID, data}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
}
}
@ -458,7 +460,7 @@ func TestStructSaver(t *testing.T) {
if wantIID := "iid"; gotIID != wantIID {
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
}
}
@ -523,7 +525,7 @@ func TestConvertRows(t *testing.T) {
if err != nil {
t.Fatalf("got %v, want nil", err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("\ngot %v\nwant %v", got, want)
}
}
@ -542,7 +544,7 @@ func TestValueList(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -551,7 +553,7 @@ func TestValueList(t *testing.T) {
if err := vl.Load(want, schema); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}
@ -588,7 +590,7 @@ func TestValueMap(t *testing.T) {
map[string]Value{"x": 5, "y": 6},
},
}
if !reflect.DeepEqual(vm, valueMap(want)) {
if !testutil.Equal(vm, valueMap(want)) {
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
}
@ -669,7 +671,7 @@ func TestStructLoader(t *testing.T) {
Nested: nested{NestS: "nested", NestI: 17},
Tagged: "z",
}
if !reflect.DeepEqual(&ts1, want) {
if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) {
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
d, _, err := pretty.Diff(*want, ts1)
if err == nil {
@ -684,7 +686,7 @@ func TestStructLoader(t *testing.T) {
t.Fatal(err)
}
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
if !reflect.DeepEqual(&np, want2) {
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
}
@ -694,7 +696,7 @@ func TestStructLoader(t *testing.T) {
if err := load(&np, schema2, testValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&np, want2) {
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
}
if np.Nested != nst {
@ -739,7 +741,7 @@ func TestStructLoaderRepeated(t *testing.T) {
LongNums: [...]int{1, 2, 3, 0, 0},
Nested: []*nested{{"x", 1}, {"y", 2}},
}
if !reflect.DeepEqual(r1, want) {
if !testutil.Equal(r1, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
}
@ -750,7 +752,7 @@ func TestStructLoaderRepeated(t *testing.T) {
if err := load(&r2, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(r2, want) {
if !testutil.Equal(r2, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
}
if got, want := cap(r2.Nums), 5; got != want {
@ -762,7 +764,7 @@ func TestStructLoaderRepeated(t *testing.T) {
if err := load(&r3, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(r3, want) {
if !testutil.Equal(r3, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
}
if got, want := cap(r3.Nums), 3; got != want {
@ -801,7 +803,7 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
t.Fatal(err)
}
want1 := S1{I: 7}
if !reflect.DeepEqual(s1, want1) {
if !testutil.Equal(s1, want1) {
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
}
@ -813,7 +815,7 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
t.Fatal(err)
}
want2 := S2{}
if !reflect.DeepEqual(s2, want2) {
if !testutil.Equal(s2, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
}
}

View File

@ -26,10 +26,12 @@ import (
lroauto "cloud.google.com/go/longrunning/autogen"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/grpc/codes"
)
const adminAddr = "bigtableadmin.googleapis.com:443"
@ -52,7 +54,7 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -92,17 +94,18 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
return names, nil
}
// TableConf contains all of the information necessary to create a table with column families.
type TableConf struct {
TableID string
SplitKeys []string
// Families is a map from family name to GCPolicy
Families map[string]GCPolicy
}
// CreateTable creates a new table in the instance.
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
}
_, err := ac.tClient.CreateTable(ctx, req)
return err
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table})
}
// CreatePresplitTable creates a new table in the instance.
@ -110,16 +113,29 @@ func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error {
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error {
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys})
}
// CreateTableFromConf creates a new table in the instance from the given configuration.
func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
var req_splits []*btapb.CreateTableRequest_Split
for _, split := range split_keys {
for _, split := range conf.SplitKeys {
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
}
ctx = mergeOutgoingMetadata(ctx, ac.md)
var tbl btapb.Table
if conf.Families != nil {
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
for fam, policy := range conf.Families {
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
}
}
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
TableId: conf.TableID,
Table: &tbl,
InitialSplits: req_splits,
}
_, err := ac.tClient.CreateTable(ctx, req)
@ -171,13 +187,13 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str
// TableInfo represents information about a table.
type TableInfo struct {
// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
Families []string
Families []string
FamilyInfos []FamilyInfo
}
// FamilyInfo represents information about a column family.
type FamilyInfo struct {
Name string
Name string
GCPolicy string
}
@ -251,7 +267,7 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -297,6 +313,14 @@ func (st StorageType) proto() btapb.StorageType {
return btapb.StorageType_SSD
}
// InstanceType is the type of the instance
type InstanceType int32
const (
PRODUCTION InstanceType = InstanceType(btapb.Instance_PRODUCTION)
DEVELOPMENT = InstanceType(btapb.Instance_DEVELOPMENT)
)
// InstanceInfo represents information about an instance
type InstanceInfo struct {
Name string // name of the instance
@ -306,8 +330,10 @@ type InstanceInfo struct {
// InstanceConf contains the information necessary to create an Instance
type InstanceConf struct {
InstanceId, DisplayName, ClusterId, Zone string
NumNodes int32
StorageType StorageType
// NumNodes must not be specified for DEVELOPMENT instance types
NumNodes int32
StorageType StorageType
InstanceType InstanceType
}
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
@ -319,7 +345,7 @@ func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *Instan
req := &btapb.CreateInstanceRequest{
Parent: "projects/" + iac.project,
InstanceId: conf.InstanceId,
Instance: &btapb.Instance{DisplayName: conf.DisplayName},
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
Clusters: map[string]*btapb.Cluster{
conf.ClusterId: {
ServeNodes: conf.NumNodes,
@ -355,6 +381,11 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
if err != nil {
return nil, err
}
if len(res.FailedLocations) > 0 {
// We don't have a good way to return a partial result in the face of some zones being unavailable.
// Fail the entire request.
return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations)
}
var is []*InstanceInfo
for _, i := range res.Instances {

View File

@ -21,6 +21,7 @@ import (
"fmt"
"golang.org/x/net/context"
"reflect"
"strings"
)
@ -107,6 +108,28 @@ func TestAdminIntegration(t *testing.T) {
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
}
tblConf := TableConf{
TableID: "conftable",
Families: map[string]GCPolicy{
"fam1": MaxVersionsPolicy(1),
"fam2": MaxVersionsPolicy(2),
},
}
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil {
t.Fatalf("Creating table from TableConf: %v", err)
}
defer adminClient.DeleteTable(ctx, tblConf.TableID)
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID)
if err != nil {
t.Fatalf("Getting table info: %v", err)
}
sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"}
if !reflect.DeepEqual(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
}
// Populate mytable and drop row ranges
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
t.Fatalf("Creating column family: %v", err)

View File

@ -28,7 +28,7 @@ import (
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -53,9 +53,15 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
return nil, err
}
// Default to a small connection pool that can be overridden.
o = append(o, option.WithGRPCConnectionPool(4))
o = append(o,
option.WithGRPCConnectionPool(4),
// Set the max size to correspond to server-side limits.
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -211,6 +217,7 @@ func decodeFamilyProto(r Row, row string, f *btpb.Family) {
}
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
// The serialized size of the RowSet must be no larger than 1MiB.
type RowSet interface {
proto() *btpb.RowSet
@ -391,6 +398,9 @@ type ReadOption interface {
}
// RowFilter returns a ReadOption that applies f to the contents of read rows.
//
// If multiple RowFilters are provided, only the last is used. To combine filters,
// use ChainFilters or InterleaveFilters instead.
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter }
@ -571,7 +581,7 @@ type entryErr struct {
Err error
}
// ApplyBulk applies multiple Mutations.
// ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
// Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order.
//

View File

@ -448,6 +448,24 @@ func TestClientIntegration(t *testing.T) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
}
}
// Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator.
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
// Get only the correct row back on read.
r, err := tbl.ReadRow(ctx, "issue-723-1")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if r.Key() != "issue-723-1" {
t.Errorf("ApplyReadModifyWrite: incorrect read after RMW,\n got %v\nwant %v", r.Key(), "issue-723-1")
}
checkpoint("tested ReadModifyWrite")
// Test arbitrary timestamps more thoroughly.
@ -460,11 +478,12 @@ func TestClientIntegration(t *testing.T) {
// Timestamps are used in thousands because the server
// only permits that granularity.
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
mut.Set("ts", "col2", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
}
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
r, err := tbl.ReadRow(ctx, "testrow")
r, err = tbl.ReadRow(ctx, "testrow")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
@ -474,6 +493,10 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
@ -486,10 +509,39 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
}
// Check cell offset / limit
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowLimitFilter(3)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
}
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
}
// Check timestamp range filtering (with truncation)
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000)))
if err != nil {
@ -498,6 +550,8 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
@ -510,6 +564,9 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
@ -559,6 +616,8 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
@ -717,7 +776,7 @@ func TestClientIntegration(t *testing.T) {
checkpoint("tested high concurrency")
// Large reads, writes and scans.
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB.
bigBytes := make([]byte, 5<<20) // 5 MB is larger than current default gRPC max of 4 MB, but less than the max we set.
nonsense := []byte("lorem ipsum dolor sit amet, ")
fill(bigBytes, nonsense)
mut = NewMutation()

View File

@ -277,7 +277,6 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
// Rows to read can be specified by a set of row keys and/or a set of row ranges.
// Output is a stream of sorted, de-duped rows.
tbl.mu.RLock()
rowSet := make(map[string]*row)
if req.Rows != nil {
// Add the explicitly given keys
@ -459,6 +458,38 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
if !rx.MatchString(r.key) {
return false
}
case *btpb.RowFilter_CellsPerRowLimitFilter:
// Grab the first n cells in the row.
lim := int(f.CellsPerRowLimitFilter)
for _, fam := range r.families {
for _, col := range fam.colNames {
cs := fam.cells[col]
if len(cs) > lim {
fam.cells[col] = cs[:lim]
lim = 0
} else {
lim -= len(cs)
}
}
}
return true
case *btpb.RowFilter_CellsPerRowOffsetFilter:
// Skip the first n cells in the row.
offset := int(f.CellsPerRowOffsetFilter)
for _, fam := range r.families {
for _, col := range fam.colNames {
cs := fam.cells[col]
if len(cs) > offset {
fam.cells[col] = cs[offset:]
offset = 0
return true
} else {
fam.cells[col] = cs[:0]
offset -= len(cs)
}
}
}
return true
}
// Any other case, operate on a per-cell basis.
@ -592,9 +623,8 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
defer r.mu.Unlock()
@ -611,14 +641,13 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
fs := tbl.columnFamilies()
defer tbl.resortRowIndex()
for i, entry := range req.Entries {
r := tbl.mutableRow(string(entry.RowKey))
r, _ := tbl.mutableRow(string(entry.RowKey))
r.mu.Lock()
code, msg := int32(codes.OK), ""
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
@ -642,12 +671,11 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.CheckAndMutateRowResponse{}
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
@ -798,12 +826,16 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
rowKey := string(req.RowKey)
r, isNewRow := tbl.mutableRow(rowKey)
// This must be done before the row lock, acquired below, is released.
if isNewRow {
defer tbl.resortRowIndex()
}
r.mu.Lock()
defer r.mu.Unlock()
// Assume all mutations apply to the most recent version of the cell.
@ -993,13 +1025,13 @@ func (t *table) columnFamilies() map[string]*columnFamily {
return cp
}
func (t *table) mutableRow(row string) *row {
func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
// Try fast path first.
t.mu.RLock()
r := t.rowIndex[row]
t.mu.RUnlock()
if r != nil {
return r
return r, false
}
// We probably need to create the row.
@ -1011,7 +1043,7 @@ func (t *table) mutableRow(row string) *row {
t.rows = append(t.rows, r)
}
t.mu.Unlock()
return r
return r, true
}
func (t *table) resortRowIndex() {

View File

@ -36,6 +36,7 @@ import (
"cloud.google.com/go/bigtable/internal/stat"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
var (
@ -74,7 +75,12 @@ func main() {
var options []option.ClientOption
if *poolSize > 1 {
options = append(options, option.WithGRPCConnectionPool(*poolSize))
options = append(options,
option.WithGRPCConnectionPool(*poolSize),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
}
var csvFile *os.File

View File

@ -98,7 +98,7 @@ type EmulatedEnv struct {
// NewEmulatedEnv builds and starts the emulator based environment
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
srv, err := bttest.NewServer("127.0.0.1:0")
srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
if err != nil {
return nil, err
}
@ -152,7 +152,7 @@ func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
func (e *EmulatedEnv) NewClient() (*Client, error) {
timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
if err != nil {
return nil, err
}

View File

@ -285,4 +285,34 @@ func (cf conditionFilter) proto() *btpb.RowFilter {
}}}
}
// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells.
func CellsPerRowOffsetFilter(n int) Filter {
return cellsPerRowOffsetFilter(n)
}
type cellsPerRowOffsetFilter int32
func (cof cellsPerRowOffsetFilter) String() string {
return fmt.Sprintf("cells_per_row_offset(%d)", cof)
}
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
}
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
func CellsPerRowLimitFilter(n int) Filter {
return cellsPerRowLimitFilter(n)
}
type cellsPerRowLimitFilter int32
func (clf cellsPerRowLimitFilter) String() string {
return fmt.Sprintf("cells_per_row_limit(%d)", clf)
}
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
}
// TODO(dsymonds): More filters: sampling

View File

@ -16,9 +16,10 @@ package civil
import (
"encoding/json"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
func TestDates(t *testing.T) {
@ -418,7 +419,7 @@ func TestUnmarshalJSON(t *testing.T) {
if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil {
t.Fatalf("%s: %v", test.data, err)
}
if !reflect.DeepEqual(test.ptr, test.want) {
if !cmp.Equal(test.ptr, test.want) {
t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want)
}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build linux,go1.7
package main

View File

@ -28,7 +28,7 @@ import (
cd "google.golang.org/api/clouddebugger/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
)
const (
@ -92,7 +92,7 @@ type serviceInterface interface {
}
var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) {
httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource))
httpClient, endpoint, err := htransport.NewClient(ctx, option.WithTokenSource(tokenSource))
if err != nil {
return nil, err
}

View File

@ -25,7 +25,7 @@ import (
"golang.org/x/net/context"
raw "google.golang.org/api/container/v1"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
)
type Type string
@ -64,7 +64,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}

118
vendor/cloud.google.com/go/datastore/client.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
gax "github.com/googleapis/gax-go"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Lookup(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.RunQuery(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.BeginTransaction(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Commit(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Rollback(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.AllocateIds(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error {
ctx = metadata.NewOutgoingContext(ctx, dc.md)
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = f(ctx)
return !shouldRetry(err), err
})
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
s, ok := status.FromError(err)
if !ok {
return false
}
// See https://cloud.google.com/datastore/docs/concepts/errors.
return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded
}

View File

@ -21,15 +21,11 @@ import (
"os"
"reflect"
"cloud.google.com/go/internal/version"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
@ -44,56 +40,6 @@ const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"
// protoClient is an interface for *transport.ProtoClient to support injecting
// fake clients in tests.
type protoClient interface {
Call(context.Context, string, proto.Message, proto.Message) error
}
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
return dc.c.Lookup(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return dc.c.RunQuery(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
return dc.c.BeginTransaction(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
return dc.c.Commit(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
return dc.c.Rollback(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
return dc.c.AllocateIds(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
@ -138,7 +84,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -201,7 +147,6 @@ func keyToProto(k *Key) *pb.Key {
return nil
}
// TODO(jbd): Eliminate unrequired allocations.
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.Kind}
@ -210,12 +155,19 @@ func keyToProto(k *Key) *pb.Key {
} else if k.Name != "" {
el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
}
path = append([]*pb.Key_PathElement{el}, path...)
path = append(path, el)
if k.Parent == nil {
break
}
k = k.Parent
}
// The path should be in order [grandparent, parent, child]
// We did it backward above, so reverse back.
for i := 0; i < len(path)/2; i++ {
path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
}
key := &pb.Key{Path: path}
if k.Namespace != "" {
key.PartitionId = &pb.PartitionId{
@ -390,17 +342,21 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their index
// Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
// Equal keys are deduped.
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string]int)
pbKeys := make([]*pb.Key, len(keys))
keyMap := make(map[string][]int, len(keys))
pbKeys := make([]*pb.Key, 0, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
keyMap[k.String()] = i
pbKeys[i] = keyToProto(k)
ks := k.String()
if _, ok := keyMap[ks]; !ok {
pbKeys = append(pbKeys, keyToProto(k))
}
keyMap[ks] = append(keyMap[ks], i)
}
}
if any {
@ -434,25 +390,26 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
found = append(found, resp.Found...)
missing = append(missing, resp.Missing...)
}
if len(keys) != len(found)+len(missing) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
filled := 0
for _, e := range found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
index := keyMap[k.String()]
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
}
for _, e := range missing {
@ -460,9 +417,17 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
multiErr[keyMap[k.String()]] = ErrNoSuchEntity
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
multiErr[index] = ErrNoSuchEntity
}
any = true
}
if filled != len(keys) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
if any {
return multiErr
}
@ -592,13 +557,18 @@ func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
set := make(map[string]bool, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
ks := k.String()
if !set[ks] {
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
}
set[ks] = true
}
return mutations, nil
}

View File

@ -170,6 +170,7 @@ func TestGetMulti(t *testing.T) {
{key: NameKey("X", "item1", p), put: true},
{key: NameKey("X", "item2", p), put: false},
{key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item4", p), put: true},
}
@ -1003,6 +1004,8 @@ func TestNilPointers(t *testing.T) {
t.Errorf("Get: err %v; want %v", err, want)
}
// Test that deleting with duplicate keys work.
keys = append(keys, keys...)
if err := client.DeleteMulti(ctx, keys); err != nil {
t.Errorf("Delete: %v", err)
}

View File

@ -88,10 +88,10 @@ type Controller2Client struct {
//
// The debugger agents register with the Controller to identify the application
// being debugged, the Debuggee. All agents that register with the same data,
// represent the same Debuggee, and are assigned the same `debuggee_id`.
// represent the same Debuggee, and are assigned the same debuggee_id.
//
// The debugger agents call the Controller to retrieve the list of active
// Breakpoints. Agents with the same `debuggee_id` get the same breakpoints
// Breakpoints. Agents with the same debuggee_id get the same breakpoints
// list. An agent that can fulfill the breakpoint request updates the
// Controller with the breakpoint result. The controller selects the first
// result received and discards the rest of the results.
@ -139,14 +139,14 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
// RegisterDebuggee registers the debuggee with the controller service.
//
// All agents attached to the same application should call this method with
// the same request content to get back the same stable `debuggee_id`. Agents
// should call this method again whenever `google.rpc.Code.NOT_FOUND` is
// returned from any controller method.
// All agents attached to the same application must call this method with
// exactly the same request content to get back the same stable debuggee_id.
// Agents should call this method again whenever google.rpc.Code.NOT_FOUND
// is returned from any controller method.
//
// This allows the controller service to disable the agent or recover from any
// data loss. If the debuggee is disabled by the server, the response will
// have `is_disabled` set to `true`.
// This protocol allows the controller service to disable debuggees, recover
// from data loss, or change the debuggee_id format. Agents must handle
// debuggee_id value changing upon re-registration.
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
@ -164,7 +164,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee.
//
// The breakpoint specification (location, condition, and expression
// The breakpoint specification (location, condition, and expressions
// fields) is semantically immutable, although the field values may
// change. For example, an agent may update the location line number
// to reflect the actual line where the breakpoint was set, but this
@ -191,12 +191,11 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
}
// UpdateActiveBreakpoint updates the breakpoint state or mutable fields.
// The entire Breakpoint message must be sent back to the controller
// service.
// The entire Breakpoint message must be sent back to the controller service.
//
// Updates to active breakpoint fields are only allowed if the new value
// does not change the breakpoint specification. Updates to the `location`,
// `condition` and `expression` fields should not alter the breakpoint
// does not change the breakpoint specification. Updates to the location,
// condition and expressions fields should not alter the breakpoint
// semantics. These may only make changes such as canonicalizing a value
// or snapping the location to the correct line of code.
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {

View File

@ -91,9 +91,9 @@ type Debugger2Client struct {
// and without modifying its state. An application may include one or
// more replicated processes performing the same work.
//
// The application is represented using the Debuggee concept. The Debugger
// service provides a way to query for available Debuggees, but does not
// provide a way to create one. A debuggee is created using the Controller
// A debugged application is represented using the Debuggee concept. The
// Debugger service provides a way to query for available debuggees, but does
// not provide a way to create one. A debuggee is created using the Controller
// service, usually by running a debugger agent with the application.
//
// The Debugger service enables the client to set one or more Breakpoints on a
@ -193,7 +193,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
return resp, nil
}
// ListDebuggees lists all the debuggees that the user can set breakpoints to.
// ListDebuggees lists all the debuggees that the user has access to.
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)

View File

@ -35,8 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -0,0 +1,77 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestDlpServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE
var inspectConfig = &dlppb.InspectConfig{
MinLikelihood: minLikelihood,
}
var type_ string = "text/plain"
var value string = "my phone number is 215-512-1212"
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
if _, err := c.InspectContent(ctx, request); err != nil {
t.Error(err)
}
}

327
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
"time"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
InspectContent []gax.CallOption
RedactContent []gax.CallOption
CreateInspectOperation []gax.CallOption
ListInspectFindings []gax.CallOption
ListInfoTypes []gax.CallOption
ListRootCategories []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("dlp.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
InspectContent: retry[[2]string{"default", "non_idempotent"}],
RedactContent: retry[[2]string{"default", "non_idempotent"}],
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
ListInspectFindings: retry[[2]string{"default", "idempotent"}],
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
ListRootCategories: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with DLP API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client dlppb.DlpServiceClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *CallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewClient creates a new dlp service client.
//
// The DLP API is a service that allows clients
// to detect the presence of Personally Identifiable Information (PII) and other
// privacy-sensitive data in user-supplied, unstructured data streams, like text
// blocks or images.
// The service also includes methods for sensitive data redaction and
// scheduling of data scans on Google Cloud Platform based data sets.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: dlppb.NewDlpServiceClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// ResultPath returns the path for the result resource.
func ResultPath(result string) string {
return "" +
"inspect/results/" +
result +
""
}
// InspectContent finds potentially sensitive info in a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
var resp *dlppb.InspectContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// RedactContent redacts potentially sensitive info from a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
var resp *dlppb.RedactContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.RedactContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
// repository.
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &CreateInspectOperationHandle{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// ListInspectFindings returns list of results for given inspect operation result set id.
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
var resp *dlppb.ListInspectFindingsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListInfoTypes returns sensitive information types for given category.
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
var resp *dlppb.ListInfoTypesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListRootCategories returns the list of root categories of sensitive information.
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
var resp *dlppb.ListRootCategoriesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
type CreateInspectOperationHandle struct {
lro *longrunning.Operation
}
// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name.
// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process.
func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle {
return &CreateInspectOperationHandle{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
var resp dlppb.InspectOperationResult
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
var resp dlppb.InspectOperationResult
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) {
var meta dlppb.InspectOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *CreateInspectOperationHandle) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *CreateInspectOperationHandle) Name() string {
return op.lro.Name()
}

View File

@ -0,0 +1,146 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp_test
import (
"cloud.google.com/go/dlp/apiv2beta1"
"golang.org/x/net/context"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_InspectContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.InspectContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.InspectContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_RedactContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.RedactContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.RedactContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateInspectOperation() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateInspectOperationRequest{
// TODO: Fill request struct fields.
}
op, err := c.CreateInspectOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInspectFindings() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInspectFindingsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListInspectFindings(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInfoTypes() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInfoTypesRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListInfoTypes(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListRootCategories() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListRootCategoriesRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListRootCategories(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

42
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package dlp is an experimental, auto-generated package for the
// DLP API.
//
// The Google Data Loss Prevention API provides methods for detection of
// privacy-sensitive fragments in text, images, and Google Cloud Platform
// storage repositories.
package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertXGoog(ctx context.Context, val []string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md["x-goog-api-client"] = val
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

556
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
import (
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockDlpServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
dlppb.DlpServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.InspectContentResponse), nil
}
func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.RedactContentResponse), nil
}
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil
}
func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListInfoTypesResponse), nil
}
func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockDlp mockDlpServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
dlppb.RegisterDlpServiceServer(serv, &mockDlp)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestDlpServiceInspectContent(t *testing.T) {
var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.InspectContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceInspectContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.InspectContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceRedactContent(t *testing.T) {
var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig,
Items: items,
ReplaceConfigs: replaceConfigs,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.RedactContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceRedactContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig,
Items: items,
ReplaceConfigs: replaceConfigs,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.RedactContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceCreateInspectOperation(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &dlppb.InspectOperationResult{
Name: name,
}
mockDlp.err = nil
mockDlp.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig,
StorageConfig: storageConfig,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.CreateInspectOperation(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceCreateInspectOperationError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = nil
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig,
StorageConfig: storageConfig,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.CreateInspectOperation(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListInspectFindings(t *testing.T) {
var nextPageToken string = "nextPageToken-1530815211"
var expectedResponse = &dlppb.ListInspectFindingsResponse{
NextPageToken: nextPageToken,
}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var formattedName string = ResultPath("[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInspectFindings(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListInspectFindingsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var formattedName string = ResultPath("[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInspectFindings(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListInfoTypes(t *testing.T) {
var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var category string = "category50511102"
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListInfoTypesRequest{
Category: category,
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInfoTypes(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListInfoTypesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var category string = "category50511102"
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListInfoTypesRequest{
Category: category,
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInfoTypes(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListRootCategories(t *testing.T) {
var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListRootCategories(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListRootCategoriesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListRootCategories(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@ -20,6 +20,8 @@
// Stackdriver Error Reporting groups and counts similar errors from cloud
// services. The Stackdriver Error Reporting API provides a way to report new
// errors and read access to error groups and their associated errors.
//
// Use the client at cloud.google.com/go/errorreporting in preference to this.
package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1"
import (
@ -34,8 +36,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -29,10 +29,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
errorGroupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}")
)
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
type ErrorGroupCallOptions struct {
GetGroup []gax.CallOption
@ -122,14 +118,12 @@ func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
// ErrorGroupGroupPath returns the path for the group resource.
func ErrorGroupGroupPath(project, group string) string {
path, err := errorGroupGroupPathTemplate.Render(map[string]string{
"project": project,
"group": group,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/groups/" +
group +
""
}
// GetGroup get the specified group.

View File

@ -31,10 +31,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
errorStatsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
type ErrorStatsCallOptions struct {
ListGroupStats []gax.CallOption
@ -127,13 +123,10 @@ func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
// ErrorStatsProjectPath returns the path for the project resource.
func ErrorStatsProjectPath(project string) string {
path, err := errorStatsProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// ListGroupStats lists the specified groups.

View File

@ -26,10 +26,6 @@ import (
"google.golang.org/grpc"
)
var (
reportErrorsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
type ReportErrorsCallOptions struct {
ReportErrorEvent []gax.CallOption
@ -104,13 +100,10 @@ func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
// ReportErrorsProjectPath returns the path for the project resource.
func ReportErrorsProjectPath(project string) string {
path, err := reportErrorsProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// ReportErrorEvent report an individual error event.
@ -119,8 +112,7 @@ func ReportErrorsProjectPath(project string) string {
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)

View File

@ -0,0 +1,215 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
type fakeLogger struct {
entry *logging.Entry
fail bool
}
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
if c.fail {
return errors.New("request failed")
}
c.entry = &e
return nil
}
func (c *fakeLogger) Close() error {
return nil
}
func newTestClientUsingLogging(c *fakeLogger) *Client {
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
func TestCatchNothingUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func entryMessage(e *logging.Entry) string {
return e.Payload.(map[string]interface{})["message"].(string)
}
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
t.Errorf("error report didn't contain service name")
}
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(entryMessage(e), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(entryMessage(e), panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchPanic")
if !strings.Contains(entryMessage(e), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReportsUsingLogging(t *testing.T) {
fl := &fakeLogger{fail: true}
c := newTestClientUsingLogging(fl)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchNilPanic")
if !strings.Contains(entryMessage(e), "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReportUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Report(ctx, nil, "hello, ", "error")
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReport")
}
func TestReportfUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReportf")
if !strings.Contains(entryMessage(e), "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

456
vendor/cloud.google.com/go/errorreporting/errors.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package errorreporting is a Google Stackdriver Error Reporting library.
//
// This package is still experimental and subject to change.
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errorreporting"
// ...
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to Stackdriver Error Reporting.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
package errorreporting // import "cloud.google.com/go/errorreporting"
import (
"bytes"
"fmt"
"log"
"net/http"
"runtime"
"strings"
"time"
api "cloud.google.com/go/errorreporting/apiv1beta1"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/logging"
"github.com/golang/protobuf/ptypes/timestamp"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const (
userAgent = `gcloud-golang-errorreporting/20160701`
)
type apiInterface interface {
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
Close() error
}
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
client, err := api.NewReportErrorsClient(ctx, opts...)
if err != nil {
return nil, err
}
client.SetGoogleClientInfo("gccl", version.Repo)
return client, nil
}
type loggerInterface interface {
LogSync(ctx context.Context, e logging.Entry) error
Close() error
}
type logger struct {
*logging.Logger
c *logging.Client
}
func (l logger) Close() error {
return l.c.Close()
}
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
lc, err := logging.NewClient(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
l := lc.Logger("errorreports")
return logger{l, lc}, nil
}
type sender interface {
send(ctx context.Context, r *http.Request, message string)
close() error
}
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
type errorApiSender struct {
apiClient apiInterface
projectID string
serviceContext erpb.ServiceContext
}
// loggingSender sends error reports using the Stackdriver Logging API.
type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
sender := &loggingSender{
logger: l,
projectID: projectID,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
sender.serviceContext["version"] = serviceVersion
}
c := &Client{
sender: sender,
RepanicDefault: true,
}
return c, nil
} else {
a, err := newApiInterface(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
}
c := &Client{
sender: &errorApiSender{
apiClient: a,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: serviceName,
Version: serviceVersion,
},
},
RepanicDefault: true,
}
return c, nil
}
}
// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
err := c.sender.close()
c.sender = nil
return err
}
// An Option is an optional argument to Catch.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
//
// Here is an example of how to use PanicFlag:
//
// func foo(ctx context.Context, ...) {
// hasPanicked := true
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked))
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
}
func (h panicFlag) isOption() {}
// Repanic returns an Option that determines whether Catch will re-panic after
// it reports an error. This overrides the default in the client.
func Repanic(r bool) Option { return repanic(r) }
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
}
x := recover()
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack, isPanic)
if c == nil {
log.Println("Error report used nil client:", message)
return
}
c.send(ctx, r, message)
}
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
"message": message,
"serviceContext": s.serviceContext,
}
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
e := logging.Entry{
Severity: logging.Error,
Payload: payload,
}
err := s.logger.LogSync(ctx, e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
func (s *loggingSender) close() error {
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
time := time.Now()
var errorContext *erpb.ErrorContext
if r != nil {
errorContext = &erpb.ErrorContext{
HttpRequest: &erpb.HttpRequestContext{
Method: r.Method,
Url: r.Host + r.RequestURI,
UserAgent: r.UserAgent(),
Referrer: r.Referer(),
RemoteIp: r.RemoteAddr,
},
}
}
req := erpb.ReportErrorEventRequest{
ProjectName: s.projectID,
Event: &erpb.ReportedErrorEvent{
EventTime: &timestamp.Timestamp{
Seconds: time.Unix(),
Nanos: int32(time.Nanosecond()),
},
ServiceContext: &s.serviceContext,
Message: message,
Context: errorContext,
},
}
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
if err != nil {
log.Println("Error writing error report:", err, "report:", message)
}
}
func (s *errorApiSender) close() error {
return s.apiClient.Close()
}
// chopStack trims a stack trace so that the function which panics or calls
// Report is first.
func chopStack(s []byte, isPanic bool) string {
var f []byte
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 {
return string(s)
}
stack := s[lfFirst:]
panicLine := bytes.Index(stack, f)
if panicLine == -1 {
return string(s)
}
stack = stack[panicLine+1:]
for i := 0; i < 2; i++ {
nextLine := bytes.IndexByte(stack, '\n')
if nextLine == -1 {
return string(s)
}
stack = stack[nextLine+1:]
}
return string(s[:lfFirst+1]) + string(stack)
}

View File

@ -0,0 +1,212 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const testProjectID = "testproject"
type fakeReportErrorsClient struct {
req *erpb.ReportErrorEventRequest
fail bool
}
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
if c.fail {
return nil, errors.New("request failed")
}
c.req = req
return &erpb.ReportErrorEventResponse{}, nil
}
func (c *fakeReportErrorsClient) Close() error {
return nil
}
func newTestClient(c *fakeReportErrorsClient) *Client {
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
var ctx context.Context
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
if req.Event.ServiceContext.Service != "myservice" {
t.Errorf("error report didn't contain service name")
}
if req.Event.ServiceContext.Version != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(req.Event.Message, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(req.Event.Message, panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchPanic")
if !strings.Contains(r.Event.Message, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
fc := &fakeReportErrorsClient{fail: true}
c := newTestClient(fc)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
if !strings.Contains(r.Event.Message, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Report(ctx, nil, "hello, ", "error")
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReport")
}
func TestReportf(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReportf")
if !strings.Contains(r.Event.Message, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}

118
vendor/cloud.google.com/go/errorreporting/stack_test.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import "testing"
func TestChopStack(t *testing.T) {
for _, test := range []struct {
name string
in []byte
expected string
isPanic bool
}{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "Report",
in: []byte(` goroutine 39 [running]:
runtime/debug.Stack()
/gopath/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Report()
/gopath/cloud.google.com/go/errorreporting/errors.go:248 +0x4ed
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`),
expected: ` goroutine 39 [running]:
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`,
isPanic: false,
},
} {
out := chopStack(test.in, test.isPanic)
if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
}
}
}

View File

@ -204,3 +204,12 @@ func TestReportfUsingLogging(t *testing.T) {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@ -18,14 +18,7 @@
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function. Generally you will want
// to do this on program initialization. The NewClient function takes as
// arguments a context, the project name, a service name, and a version string.
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty. NewClient
// also takes a bool that indicates whether to report errors using Stackdriver
// Logging, which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errors"
// ...
@ -76,6 +69,8 @@
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
//
// Deprecated: Use cloud.google.com/go/errorreporting instead.
package errors // import "cloud.google.com/go/errors"
import (
@ -155,9 +150,9 @@ type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
client *logging.Client
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
@ -166,6 +161,16 @@ type Client struct {
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
@ -383,7 +388,7 @@ func (s *loggingSender) send(ctx context.Context, r *http.Request, message strin
}
func (s *loggingSender) close() error {
return s.client.Close()
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {

View File

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -32,12 +32,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
iamProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
iamServiceAccountPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}")
iamKeyPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}/keys/{key}")
)
// IamCallOptions contains the retry settings for each method of IamClient.
type IamCallOptions struct {
ListServiceAccounts []gax.CallOption
@ -119,16 +113,16 @@ type IamClient struct {
// of to an individual end user. It is used to authenticate calls
// to a Google API.
//
// To create a service account, specify the `project_id` and `account_id`
// for the account. The `account_id` is unique within the project, and used
// To create a service account, specify the project_id and account_id
// for the account. The account_id is unique within the project, and used
// to generate the service account email address and a stable
// `unique_id`.
// unique_id.
//
// All other methods can identify accounts using the format
// `projects/{project}/serviceAccounts/{account}`.
// Using `-` as a wildcard for the project will infer the project from
// the account. The `account` value can be the `email` address or the
// `unique_id` of the service account.
// projects/{project}/serviceAccounts/{account}.
// Using - as a wildcard for the project will infer the project from
// the account. The account value can be the email address or the
// unique_id of the service account.
func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...)
if err != nil {
@ -140,7 +134,7 @@ func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient,
iamClient: adminpb.NewIAMClient(conn),
}
c.SetGoogleClientInfo()
c.setGoogleClientInfo()
return c, nil
}
@ -155,10 +149,10 @@ func (c *IamClient) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
func (c *IamClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -166,38 +160,32 @@ func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
// IamProjectPath returns the path for the project resource.
func IamProjectPath(project string) string {
path, err := iamProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// IamServiceAccountPath returns the path for the service account resource.
func IamServiceAccountPath(project, serviceAccount string) string {
path, err := iamServiceAccountPathTemplate.Render(map[string]string{
"project": project,
"service_account": serviceAccount,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/serviceAccounts/" +
serviceAccount +
""
}
// IamKeyPath returns the path for the key resource.
func IamKeyPath(project, serviceAccount, key string) string {
path, err := iamKeyPathTemplate.Render(map[string]string{
"project": project,
"service_account": serviceAccount,
"key": key,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/serviceAccounts/" +
serviceAccount +
"/keys/" +
key +
""
}
// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project.
@ -271,8 +259,8 @@ func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.Creat
// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
//
// Currently, only the following fields are updatable:
// `display_name` .
// The `etag` is mandatory.
// display_name .
// The etag is mandatory.
func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...)

View File

@ -16,9 +16,10 @@ package iam
import (
"fmt"
"reflect"
"sort"
"testing"
"cloud.google.com/go/internal/testutil"
)
func TestPolicy(t *testing.T) {
@ -65,7 +66,7 @@ func TestPolicy(t *testing.T) {
if msg, ok := checkMembers(p, Owner, nil); !ok {
t.Fatal(msg)
}
if got, want := p.Roles(), []RoleName(nil); !reflect.DeepEqual(got, want) {
if got, want := p.Roles(), []RoleName(nil); !testutil.Equal(got, want) {
t.Fatalf("roles: got %v, want %v", got, want)
}
}
@ -74,7 +75,7 @@ func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool)
gotMembers := p.Members(role)
sort.Strings(gotMembers)
sort.Strings(wantMembers)
if !reflect.DeepEqual(gotMembers, wantMembers) {
if !testutil.Equal(gotMembers, wantMembers) {
return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false
}
for _, m := range wantMembers {

View File

@ -22,6 +22,10 @@ import (
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
)
type embed1 struct {
@ -142,7 +146,8 @@ func TestAgainstJSONEncodingNoTags(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, s1)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want,
cmp.AllowUnexported(S1{}, embed1{}, embed2{}, embed3{}, embed4{}, embed5{})) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -166,7 +171,7 @@ func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, myt)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -269,7 +274,7 @@ func TestAgainstJSONEncodingWithTags(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, s2)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want, cmp.AllowUnexported(S2{})) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -410,7 +415,7 @@ func compareFields(got []Field, want []*Field) (msg string, ok bool) {
}
// Need this because Field contains a function, which cannot be compared even
// by reflect.DeepEqual.
// by testutil.Equal.
func fieldsEqual(f1, f2 *Field) bool {
if f1 == nil || f2 == nil {
return f1 == f2
@ -418,7 +423,7 @@ func fieldsEqual(f1, f2 *Field) bool {
return f1.Name == f2.Name &&
f1.NameFromTag == f2.NameFromTag &&
f1.Type == f2.Type &&
reflect.DeepEqual(f1.ParsedTag, f2.ParsedTag)
testutil.Equal(f1.ParsedTag, f2.ParsedTag)
}
// Set the fields of dst from those of src.

View File

@ -38,7 +38,7 @@ go get -v ./...
# cd $GOCLOUD_HOME
# Run tests and tee output to log file, to be pushed to GCS as artifact.
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_CHANGE_NUMBER.txt
# Make sure README.md is up to date.
make -C internal/readme test diff

View File

@ -20,6 +20,7 @@ package optional
import (
"fmt"
"strings"
"time"
)
type (
@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil.
Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
)
// ToBool returns its argument as a bool.
@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x
}
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

View File

@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
"log"
"os"
"sync"
@ -227,8 +228,9 @@ type Replayer struct {
initial []byte // initial state
log func(format string, v ...interface{}) // for debugging
mu sync.Mutex
calls []*call
mu sync.Mutex
calls []*call
streams []*stream
}
// A call represents a unary RPC, with a request and response (or error).
@ -238,6 +240,16 @@ type call struct {
response message
}
// A stream represents a gRPC stream, with an initial create-stream call, followed by
// zero or more sends and/or receives.
type stream struct {
method string
createIndex int
createErr error // error from create call
sends []message
recvs []message
}
// NewReplayer creates a Replayer that reads from filename.
func NewReplayer(filename string) (*Replayer, error) {
f, err := os.Open(filename)
@ -271,6 +283,7 @@ func (rep *Replayer) read(r io.Reader) error {
rep.initial = bytes
callsByIndex := map[int]*call{}
streamsByIndex := map[int]*stream{}
for i := 1; ; i++ {
e, err := readEntry(r)
if err != nil {
@ -295,6 +308,26 @@ func (rep *Replayer) read(r io.Reader) error {
call.response = e.msg
rep.calls = append(rep.calls, call)
case pb.Entry_CREATE_STREAM:
s := &stream{method: e.method, createIndex: i}
s.createErr = e.msg.err
streamsByIndex[i] = s
rep.streams = append(rep.streams, s)
case pb.Entry_SEND:
s := streamsByIndex[e.refIndex]
if s == nil {
return fmt.Errorf("replayer: no stream for send #%d", i)
}
s.sends = append(s.sends, e.msg)
case pb.Entry_RECV:
s := streamsByIndex[e.refIndex]
if s == nil {
return fmt.Errorf("replayer: no stream for recv #%d", i)
}
s.recvs = append(s.recvs, e.msg)
default:
return fmt.Errorf("replayer: unknown kind %s", e.kind)
}
@ -314,6 +347,7 @@ func (r *Replayer) DialOptions() []grpc.DialOption {
// fixes that.
grpc.WithBlock(),
grpc.WithUnaryInterceptor(r.interceptUnary),
grpc.WithStreamInterceptor(r.interceptStream),
}
}
@ -346,6 +380,64 @@ func (r *Replayer) interceptUnary(_ context.Context, method string, req, res int
return nil
}
func (r *Replayer) interceptStream(ctx context.Context, _ *grpc.StreamDesc, _ *grpc.ClientConn, method string, _ grpc.Streamer, _ ...grpc.CallOption) (grpc.ClientStream, error) {
r.log("create-stream %s", method)
str := r.extractStream(method)
if str == nil {
return nil, fmt.Errorf("replayer: stream not found for method %s", method)
}
if str.createErr != nil {
return nil, str.createErr
}
return &repClientStream{ctx: ctx, str: str}, nil
}
type repClientStream struct {
ctx context.Context
str *stream
}
func (rcs *repClientStream) Context() context.Context { return rcs.ctx }
func (rcs *repClientStream) SendMsg(m interface{}) error {
if len(rcs.str.sends) == 0 {
return fmt.Errorf("replayer: no more sends for stream %s, created at index %d",
rcs.str.method, rcs.str.createIndex)
}
// TODO(jba): Do not assume that the sends happen in the same order on replay.
msg := rcs.str.sends[0]
rcs.str.sends = rcs.str.sends[1:]
return msg.err
}
func (rcs *repClientStream) RecvMsg(m interface{}) error {
if len(rcs.str.recvs) == 0 {
return fmt.Errorf("replayer: no more receives for stream %s, created at index %d",
rcs.str.method, rcs.str.createIndex)
}
msg := rcs.str.recvs[0]
rcs.str.recvs = rcs.str.recvs[1:]
if msg.err != nil {
return msg.err
}
proto.Merge(m.(proto.Message), msg.msg) // copy msg into m
return nil
}
func (rcs *repClientStream) Header() (metadata.MD, error) {
log.Printf("replay: stream metadata not supported")
return nil, nil
}
func (rcs *repClientStream) Trailer() metadata.MD {
log.Printf("replay: stream metadata not supported")
return nil
}
func (rcs *repClientStream) CloseSend() error {
return nil
}
// extractCall finds the first call in the list with the same method
// and request. It returns nil if it can't find such a call.
func (r *Replayer) extractCall(method string, req proto.Message) *call {
@ -363,6 +455,21 @@ func (r *Replayer) extractCall(method string, req proto.Message) *call {
return nil
}
func (r *Replayer) extractStream(method string) *stream {
r.mu.Lock()
defer r.mu.Unlock()
for i, stream := range r.streams {
if stream == nil {
continue
}
if method == stream.method {
r.streams[i] = nil
return stream
}
}
return nil
}
// Fprint reads the entries from filename and writes them to w in human-readable form.
// It is intended for debugging.
func Fprint(w io.Writer, filename string) error {

View File

@ -17,11 +17,11 @@ package rpcreplay
import (
"bytes"
"io"
"reflect"
"testing"
ipb "cloud.google.com/go/internal/rpcreplay/proto/intstore"
rpb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
@ -54,7 +54,7 @@ func TestHeaderIO(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
@ -112,7 +112,7 @@ func TestRecord(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(gotIstate, initialState) {
if !testutil.Equal(gotIstate, initialState) {
t.Fatalf("got %v, want %v", gotIstate, initialState)
}
item := &ipb.Item{Name: "a", Value: 1}
@ -239,21 +239,21 @@ func TestRecord(t *testing.T) {
}
}
// func TestReplay(t *testing.T) {
// srv := newIntStoreServer()
// defer srv.stop()
func TestReplay(t *testing.T) {
srv := newIntStoreServer()
defer srv.stop()
// buf := record(t, srv)
// rep, err := NewReplayerReader(buf)
// if err != nil {
// t.Fatal(err)
// }
// if got, want := rep.Initial(), initialState; !reflect.DeepEqual(got, want) {
// t.Fatalf("got %v, want %v", got, want)
// }
// // Replay the test.
// testService(t, srv.Addr, rep.DialOptions())
// }
buf := record(t, srv)
rep, err := NewReplayerReader(buf)
if err != nil {
t.Fatal(err)
}
if got, want := rep.Initial(), initialState; !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
// Replay the test.
testService(t, srv.Addr, rep.DialOptions())
}
func record(t *testing.T, srv *intStoreServer) *bytes.Buffer {
buf := &bytes.Buffer{}

99
vendor/cloud.google.com/go/internal/testutil/cmp.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"fmt"
"math"
"reflect"
"unicode"
"unicode/utf8"
"github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
)
var (
alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true })
defaultCmpOptions = []cmp.Option{
// Use proto.Equal for protobufs
cmp.Comparer(proto.Equal),
// NaNs compare equal
cmp.FilterValues(func(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}, alwaysEqual),
cmp.FilterValues(func(x, y float32) bool {
return math.IsNaN(float64(x)) && math.IsNaN(float64(y))
}, alwaysEqual),
}
)
// Equal tests two values for equality.
func Equal(x, y interface{}, opts ...cmp.Option) bool {
// Put default options at the end. Order doesn't matter.
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
return cmp.Equal(x, y, opts...)
}
// Diff reports the differences between two values.
// Diff(x, y) == "" iff Equal(x, y).
func Diff(x, y interface{}, opts ...cmp.Option) string {
// Put default options at the end. Order doesn't matter.
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
return cmp.Diff(x, y, opts...)
}
// TODO(jba): remove the code below when cmpopts becomes available.
// IgnoreUnexported returns an Option that only ignores the immediate unexported
// fields of a struct, including anonymous fields of unexported types.
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
}
type unexportedFilter struct{ m map[reflect.Type]bool }
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
ux.m[t] = true
}
return ux
}
func (xf unexportedFilter) filter(p cmp.Path) bool {
if len(p) < 2 {
return false
}
sf, ok := p[len(p)-1].(cmp.StructField)
if !ok {
return false
}
return xf.m[p[len(p)-2].Type()] && !isExported(sf.Name())
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}

View File

@ -15,8 +15,9 @@
package tracecontext
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
)
var validData = []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1}
@ -65,7 +66,7 @@ func TestDecode(t *testing.T) {
}
for _, tt := range tests {
gotTraceID, gotSpanID, gotOpts, gotOk := Decode(tt.data)
if !reflect.DeepEqual(gotTraceID, tt.wantTraceID) {
if !testutil.Equal(gotTraceID, tt.wantTraceID) {
t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID)
}
if gotSpanID != tt.wantSpanID {
@ -114,7 +115,7 @@ func TestEncode(t *testing.T) {
if gotN != tt.wantN {
t.Errorf("%s: n = %v, want %v", tt.name, gotN, tt.wantN)
}
if gotData := tt.dst; !reflect.DeepEqual(gotData, tt.wantData) {
if gotData := tt.dst; !testutil.Equal(gotData, tt.wantData) {
t.Errorf("%s: dst = %v, want %v", tt.name, gotData, tt.wantData)
}
}

View File

@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20170621"
const Repo = "20170928"
// Go returns the Go runtime version. The returned string
// has no whitespace.

View File

@ -0,0 +1,71 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package language
import (
languagepb "google.golang.org/genproto/googleapis/cloud/language/v1"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestLanguageServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var content string = "Hello, world!"
var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT
var document = &languagepb.Document{
Source: &languagepb.Document_Content{
Content: content,
},
Type: type_,
}
var request = &languagepb.AnalyzeSentimentRequest{
Document: document,
}
if _, err := c.AnalyzeSentiment(ctx, request); err != nil {
t.Error(err)
}
}

View File

@ -14,7 +14,7 @@
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package language is an experimental, auto-generated package for the
// Package language is an auto-generated package for the
// Google Cloud Natural Language API.
//
// Google Cloud Natural Language API provides natural language understanding
@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -31,10 +31,11 @@ import (
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
AnalyzeSentiment []gax.CallOption
AnalyzeEntities []gax.CallOption
AnalyzeSyntax []gax.CallOption
AnnotateText []gax.CallOption
AnalyzeSentiment []gax.CallOption
AnalyzeEntities []gax.CallOption
AnalyzeEntitySentiment []gax.CallOption
AnalyzeSyntax []gax.CallOption
AnnotateText []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
@ -60,10 +61,11 @@ func defaultCallOptions() *CallOptions {
},
}
return &CallOptions{
AnalyzeSentiment: retry[[2]string{"default", "idempotent"}],
AnalyzeEntities: retry[[2]string{"default", "idempotent"}],
AnalyzeSyntax: retry[[2]string{"default", "idempotent"}],
AnnotateText: retry[[2]string{"default", "idempotent"}],
AnalyzeSentiment: retry[[2]string{"default", "idempotent"}],
AnalyzeEntities: retry[[2]string{"default", "idempotent"}],
AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}],
AnalyzeSyntax: retry[[2]string{"default", "idempotent"}],
AnnotateText: retry[[2]string{"default", "idempotent"}],
}
}
@ -97,7 +99,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
client: languagepb.NewLanguageServiceClient(conn),
}
c.SetGoogleClientInfo()
c.setGoogleClientInfo()
return c, nil
}
@ -112,10 +114,10 @@ func (c *Client) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) {
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -155,6 +157,23 @@ func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEnt
return resp, nil
}
// AnalyzeEntitySentiment finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes
// sentiment associated with each entity and its mentions.
func (c *Client) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest, opts ...gax.CallOption) (*languagepb.AnalyzeEntitySentimentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.AnalyzeEntitySentiment[0:len(c.CallOptions.AnalyzeEntitySentiment):len(c.CallOptions.AnalyzeEntitySentiment)], opts...)
var resp *languagepb.AnalyzeEntitySentimentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.AnalyzeEntitySentiment(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and
// tokenization along with part of speech tags, dependency trees, and other
// properties.

View File

@ -68,6 +68,24 @@ func ExampleClient_AnalyzeEntities() {
_ = resp
}
func ExampleClient_AnalyzeEntitySentiment() {
ctx := context.Background()
c, err := language.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &languagepb.AnalyzeEntitySentimentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.AnalyzeEntitySentiment(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_AnalyzeSyntax() {
ctx := context.Background()
c, err := language.NewClient(ctx)

View File

@ -84,6 +84,18 @@ func (s *mockLanguageServer) AnalyzeEntities(ctx context.Context, req *languagep
return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil
}
func (s *mockLanguageServer) AnalyzeEntitySentiment(ctx context.Context, req *languagepb.AnalyzeEntitySentimentRequest) (*languagepb.AnalyzeEntitySentimentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*languagepb.AnalyzeEntitySentimentResponse), nil
}
func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@ -208,10 +220,8 @@ func TestLanguageServiceAnalyzeEntities(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -239,10 +249,8 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -259,6 +267,65 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) {
}
_ = resp
}
func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) {
var language string = "language-1613589672"
var expectedResponse = &languagepb.AnalyzeEntitySentimentResponse{
Language: language,
}
mockLanguage.err = nil
mockLanguage.reqs = nil
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.AnalyzeEntitySentiment(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) {
errCode := codes.PermissionDenied
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.AnalyzeEntitySentiment(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestLanguageServiceAnalyzeSyntax(t *testing.T) {
var language string = "language-1613589672"
var expectedResponse = &languagepb.AnalyzeSyntaxResponse{
@ -271,10 +338,8 @@ func TestLanguageServiceAnalyzeSyntax(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -302,10 +367,8 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -335,11 +398,9 @@ func TestLanguageServiceAnnotateText(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{
Document: document,
Features: features,
EncodingType: encodingType,
Document: document,
Features: features,
}
c, err := NewClient(context.Background(), clientOpt)
@ -368,11 +429,9 @@ func TestLanguageServiceAnnotateTextError(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{
Document: document,
Features: features,
EncodingType: encodingType,
Document: document,
Features: features,
}
c, err := NewClient(context.Background(), clientOpt)

View File

@ -0,0 +1,71 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package language
import (
languagepb "google.golang.org/genproto/googleapis/cloud/language/v1beta2"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestLanguageServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var content string = "Hello, world!"
var type_ languagepb.Document_Type = languagepb.Document_PLAIN_TEXT
var document = &languagepb.Document{
Source: &languagepb.Document_Content{
Content: content,
},
Type: type_,
}
var request = &languagepb.AnalyzeSentimentRequest{
Document: document,
}
if _, err := c.AnalyzeSentiment(ctx, request); err != nil {
t.Error(err)
}
}

View File

@ -34,8 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -35,6 +35,7 @@ type CallOptions struct {
AnalyzeEntities []gax.CallOption
AnalyzeEntitySentiment []gax.CallOption
AnalyzeSyntax []gax.CallOption
ClassifyText []gax.CallOption
AnnotateText []gax.CallOption
}
@ -65,6 +66,7 @@ func defaultCallOptions() *CallOptions {
AnalyzeEntities: retry[[2]string{"default", "idempotent"}],
AnalyzeEntitySentiment: retry[[2]string{"default", "idempotent"}],
AnalyzeSyntax: retry[[2]string{"default", "idempotent"}],
ClassifyText: retry[[2]string{"default", "idempotent"}],
AnnotateText: retry[[2]string{"default", "idempotent"}],
}
}
@ -99,7 +101,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
client: languagepb.NewLanguageServiceClient(conn),
}
c.SetGoogleClientInfo()
c.setGoogleClientInfo()
return c, nil
}
@ -114,10 +116,10 @@ func (c *Client) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) SetGoogleClientInfo(keyval ...string) {
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -192,8 +194,24 @@ func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSynta
return resp, nil
}
// AnnotateText a convenience method that provides all syntax, sentiment, and entity
// features in one call.
// ClassifyText classifies a document into categories.
func (c *Client) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest, opts ...gax.CallOption) (*languagepb.ClassifyTextResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ClassifyText[0:len(c.CallOptions.ClassifyText):len(c.CallOptions.ClassifyText)], opts...)
var resp *languagepb.ClassifyTextResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ClassifyText(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AnnotateText a convenience method that provides all syntax, sentiment, entity, and
// classification features in one call.
func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest, opts ...gax.CallOption) (*languagepb.AnnotateTextResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.AnnotateText[0:len(c.CallOptions.AnnotateText):len(c.CallOptions.AnnotateText)], opts...)

View File

@ -104,6 +104,24 @@ func ExampleClient_AnalyzeSyntax() {
_ = resp
}
func ExampleClient_ClassifyText() {
ctx := context.Background()
c, err := language.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &languagepb.ClassifyTextRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ClassifyText(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_AnnotateText() {
ctx := context.Background()
c, err := language.NewClient(ctx)

View File

@ -108,6 +108,18 @@ func (s *mockLanguageServer) AnalyzeSyntax(ctx context.Context, req *languagepb.
return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil
}
func (s *mockLanguageServer) ClassifyText(ctx context.Context, req *languagepb.ClassifyTextRequest) (*languagepb.ClassifyTextResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*languagepb.ClassifyTextResponse), nil
}
func (s *mockLanguageServer) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
@ -220,10 +232,8 @@ func TestLanguageServiceAnalyzeEntities(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -251,10 +261,8 @@ func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitiesRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -283,10 +291,8 @@ func TestLanguageServiceAnalyzeEntitySentiment(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -314,10 +320,8 @@ func TestLanguageServiceAnalyzeEntitySentimentError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeEntitySentimentRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -346,10 +350,8 @@ func TestLanguageServiceAnalyzeSyntax(t *testing.T) {
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -377,10 +379,8 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) {
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnalyzeSyntaxRequest{
Document: document,
EncodingType: encodingType,
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
@ -397,6 +397,62 @@ func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) {
}
_ = resp
}
func TestLanguageServiceClassifyText(t *testing.T) {
var expectedResponse *languagepb.ClassifyTextResponse = &languagepb.ClassifyTextResponse{}
mockLanguage.err = nil
mockLanguage.reqs = nil
mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse)
var document *languagepb.Document = &languagepb.Document{}
var request = &languagepb.ClassifyTextRequest{
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ClassifyText(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestLanguageServiceClassifyTextError(t *testing.T) {
errCode := codes.PermissionDenied
mockLanguage.err = gstatus.Error(errCode, "test error")
var document *languagepb.Document = &languagepb.Document{}
var request = &languagepb.ClassifyTextRequest{
Document: document,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ClassifyText(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestLanguageServiceAnnotateText(t *testing.T) {
var language string = "language-1613589672"
var expectedResponse = &languagepb.AnnotateTextResponse{
@ -410,11 +466,9 @@ func TestLanguageServiceAnnotateText(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{
Document: document,
Features: features,
EncodingType: encodingType,
Document: document,
Features: features,
}
c, err := NewClient(context.Background(), clientOpt)
@ -443,11 +497,9 @@ func TestLanguageServiceAnnotateTextError(t *testing.T) {
var document *languagepb.Document = &languagepb.Document{}
var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{}
var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE
var request = &languagepb.AnnotateTextRequest{
Document: document,
Features: features,
EncodingType: encodingType,
Document: document,
Features: features,
}
c, err := NewClient(context.Background(), clientOpt)

View File

@ -31,11 +31,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
configProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}")
)
// ConfigCallOptions contains the retry settings for each method of ConfigClient.
type ConfigCallOptions struct {
ListSinks []gax.CallOption
@ -133,25 +128,20 @@ func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) {
// ConfigProjectPath returns the path for the project resource.
func ConfigProjectPath(project string) string {
path, err := configProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// ConfigSinkPath returns the path for the sink resource.
func ConfigSinkPath(project, sink string) string {
path, err := configSinkPathTemplate.Render(map[string]string{
"project": project,
"sink": sink,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/sinks/" +
sink +
""
}
// ListSinks lists sinks.
@ -208,7 +198,7 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques
// CreateSink creates a sink that exports specified log entries to a destination. The
// export of newly-ingested log entries begins immediately, unless the current
// time is outside the sink's start and end times or the sink's
// `writer_identity` is not permitted to write to the destination. A sink can
// writer_identity is not permitted to write to the destination. A sink can
// export log entries only from the resource owning the sink.
func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
@ -227,12 +217,12 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink
// UpdateSink updates a sink. If the named sink doesn't exist, then this method is
// identical to
// [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create).
// sinks.create (at /logging/docs/api/reference/rest/v2/projects.sinks/create).
// If the named sink does exist, then this method replaces the following
// fields in the existing sink with values from the new sink: `destination`,
// `filter`, `output_version_format`, `start_time`, and `end_time`.
// The updated filter might also have a new `writer_identity`; see the
// `unique_writer_identity` field.
// fields in the existing sink with values from the new sink: destination,
// filter, output_version_format, start_time, and end_time.
// The updated filter might also have a new writer_identity; see the
// unique_writer_identity field.
func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...)
@ -248,7 +238,7 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink
return resp, nil
}
// DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that
// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that
// service account is also deleted.
func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)

View File

@ -35,8 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -32,11 +32,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
loggingProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}")
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
DeleteLog []gax.CallOption
@ -146,25 +141,20 @@ func (c *Client) SetGoogleClientInfo(keyval ...string) {
// ProjectPath returns the path for the project resource.
func ProjectPath(project string) string {
path, err := loggingProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// LogPath returns the path for the log resource.
func LogPath(project, log string) string {
path, err := loggingLogPathTemplate.Render(map[string]string{
"project": project,
"log": log,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/logs/" +
log +
""
}
// DeleteLog deletes all the log entries in a log.
@ -200,7 +190,7 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt
// ListLogEntries lists log entries. Use this method to retrieve log entries from
// Stackdriver Logging. For ways to export log entries, see
// [Exporting Logs](/logging/docs/export).
// Exporting Logs (at /logging/docs/export).
func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...)

View File

@ -31,11 +31,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
metricsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}")
)
// MetricsCallOptions contains the retry settings for each method of MetricsClient.
type MetricsCallOptions struct {
ListLogMetrics []gax.CallOption
@ -132,25 +127,20 @@ func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) {
// MetricsProjectPath returns the path for the project resource.
func MetricsProjectPath(project string) string {
path, err := metricsProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// MetricsMetricPath returns the path for the metric resource.
func MetricsMetricPath(project, metric string) string {
path, err := metricsMetricPathTemplate.Render(map[string]string{
"project": project,
"metric": metric,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/metrics/" +
metric +
""
}
// ListLogMetrics lists logs-based metrics.

View File

@ -46,7 +46,9 @@ import (
logtypepb "google.golang.org/genproto/googleapis/logging/type"
logpb "google.golang.org/genproto/googleapis/logging/v2"
"google.golang.org/grpc/codes"
// Import the following so EntryIterator can unmarshal log protos.
_ "google.golang.org/genproto/googleapis/appengine/logging/v1"
_ "google.golang.org/genproto/googleapis/cloud/audit"
)

View File

@ -22,7 +22,6 @@ import (
"net/http"
"net/url"
"os"
"reflect"
"testing"
"time"
@ -184,16 +183,8 @@ func TestFromLogEntry(t *testing.T) {
if err != nil {
t.Fatal(err)
}
// Test sub-values separately because %+v and %#v do not follow pointers.
// TODO(jba): use a differ or pretty-printer.
if !reflect.DeepEqual(got.HTTPRequest.Request, want.HTTPRequest.Request) {
t.Fatalf("HTTPRequest.Request:\ngot %+v\nwant %+v", got.HTTPRequest.Request, want.HTTPRequest.Request)
}
if !reflect.DeepEqual(got.HTTPRequest, want.HTTPRequest) {
t.Fatalf("HTTPRequest:\ngot %+v\nwant %+v", got.HTTPRequest, want.HTTPRequest)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("FullEntry:\ngot %+v\nwant %+v", got, want)
if diff := testutil.Diff(got, want, testutil.IgnoreUnexported(http.Request{})); diff != "" {
t.Errorf("FullEntry:\n%s", diff)
}
// Proto payload.

View File

@ -16,7 +16,6 @@ package logadmin
import (
"log"
"reflect"
"testing"
"time"
@ -64,7 +63,7 @@ func TestCreateDeleteMetric(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if want := metric; !reflect.DeepEqual(got, want) {
if want := metric; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -94,7 +93,7 @@ func TestUpdateMetric(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if want := metric; !reflect.DeepEqual(got, want) {
if want := metric; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -107,7 +106,7 @@ func TestUpdateMetric(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if want := metric; !reflect.DeepEqual(got, want) {
if want := metric; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}
@ -149,7 +148,7 @@ func TestListMetrics(t *testing.T) {
got[m.ID] = m
}
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}

View File

@ -87,8 +87,7 @@ func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) {
return fromLogSink(ls), nil
}
// UpdateSink updates an existing Sink, or creates a new one if the Sink doesn't exist.
// Requires AdminScope.
// UpdateSink updates an existing Sink. Requires AdminScope.
func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) {
ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{
SinkName: c.sinkPath(sink.ID),

View File

@ -20,7 +20,6 @@ package logadmin
import (
"log"
"reflect"
"testing"
"time"
@ -125,14 +124,14 @@ func TestCreateDeleteSink(t *testing.T) {
t.Fatal(err)
}
defer client.DeleteSink(ctx, sink.ID)
if want := sink; !reflect.DeepEqual(got, want) {
if want := sink; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
got, err = client.Sink(ctx, sink.ID)
if err != nil {
t.Fatal(err)
}
if want := sink; !reflect.DeepEqual(got, want) {
if want := sink; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -153,20 +152,22 @@ func TestUpdateSink(t *testing.T) {
Filter: testFilter,
}
// Updating a non-existent sink creates a new one.
if _, err := client.CreateSink(ctx, sink); err != nil {
t.Fatal(err)
}
got, err := client.UpdateSink(ctx, sink)
if err != nil {
t.Fatal(err)
}
defer client.DeleteSink(ctx, sink.ID)
if want := sink; !reflect.DeepEqual(got, want) {
if want := sink; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
got, err = client.Sink(ctx, sink.ID)
if err != nil {
t.Fatal(err)
}
if want := sink; !reflect.DeepEqual(got, want) {
if want := sink; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -179,7 +180,7 @@ func TestUpdateSink(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if want := sink; !reflect.DeepEqual(got, want) {
if want := sink; !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}
@ -220,7 +221,7 @@ func TestListSinks(t *testing.T) {
got[s.ID] = s
}
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}

View File

@ -86,7 +86,11 @@ var now = time.Now
// ErrOverflow signals that the number of buffered entries for a Logger
// exceeds its BufferLimit.
var ErrOverflow = errors.New("logging: log entry overflowed buffer limits")
var ErrOverflow = bundler.ErrOverflow
// ErrOversizedEntry signals that an entry's size exceeds the maximum number of
// bytes that will be sent in a single call to the logging service.
var ErrOversizedEntry = bundler.ErrOversizedItem
// Client is a Logging client. A Client is associated with a single Cloud project.
type Client struct {
@ -331,10 +335,10 @@ type entryByteThreshold int
func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) }
// EntryByteLimit is the maximum number of bytes of entries that will be sent
// in a single call to the logging service. This option limits the size of a
// single RPC payload, to account for network or service issues with large
// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has
// no effect.
// in a single call to the logging service. ErrOversizedEntry is returned if an
// entry exceeds EntryByteLimit. This option limits the size of a single RPC
// payload, to account for network or service issues with large RPCs. If
// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect.
// The default is zero, meaning there is no limit.
func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) }

View File

@ -21,7 +21,6 @@ import (
"fmt"
"log"
"os"
"reflect"
"strings"
"testing"
"time"
@ -250,7 +249,7 @@ func compareEntry(got, want *logging.Entry) bool {
if !ltesting.PayloadEqual(got.Payload, want.Payload) {
return false
}
if !reflect.DeepEqual(got.Labels, want.Labels) {
if !testutil.Equal(got.Labels, want.Labels) {
return false
}

View File

@ -19,10 +19,11 @@ package logging
import (
"net/http"
"net/url"
"reflect"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
durpb "github.com/golang/protobuf/ptypes/duration"
structpb "github.com/golang/protobuf/ptypes/struct"
@ -98,7 +99,7 @@ func TestLoggerCreation(t *testing.T) {
if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !test.defaultResource && !proto.Equal(got, want) {
t.Errorf("%v: resource: got %v, want %v", test.options, got, want)
}
if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !reflect.DeepEqual(got, want) {
if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !testutil.Equal(got, want) {
t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want)
}
if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want {

View File

@ -33,8 +33,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the authentication scopes required
// by this package.
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{}
}

View File

@ -93,7 +93,7 @@ type OperationsClient struct {
// interface to receive the real response asynchronously by polling the
// operation resource, or pass the operation resource to another API (such as
// Google Cloud Pub/Sub API) to receive the response. Any API service that
// returns long-running operations should implement the `Operations` interface
// returns long-running operations should implement the Operations interface
// so developers can have a consistent client experience.
func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultOperationsClientOptions(), opts...)...)
@ -149,10 +149,10 @@ func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.
}
// ListOperations lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`.
// server doesn't support this method, it returns UNIMPLEMENTED.
//
// NOTE: the `name` binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`.
// NOTE: the name binding below allows API services to override the binding
// to use different resource name schemes, such as users/*/operations.
func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
@ -190,13 +190,13 @@ func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningp
// CancelOperation starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// google.rpc.Code.UNIMPLEMENTED. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
// corresponding to Code.CANCELLED.
func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
@ -211,7 +211,7 @@ func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunning
// DeleteOperation deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
// google.rpc.Code.UNIMPLEMENTED.
func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.DeleteOperation[0:len(c.CallOptions.DeleteOperation):len(c.CallOptions.DeleteOperation)], opts...)

Some files were not shown because too many files have changed in this diff Show More