1
mirror of https://github.com/rclone/rclone synced 2025-02-20 16:52:03 +01:00

vendor: update all dependencies

* Update all dependencies
  * Remove all `[[constraint]]` from Gopkg.toml
  * Add in the minimum number of `[[override]]` to build
  * Remove go get of github.com/inconshreveable/mousetrap as it is vendored
  * Update docs with new policy on constraints
This commit is contained in:
Nick Craig-Wood 2018-05-02 17:09:45 +01:00
parent 21383877df
commit 6427029c4e
4902 changed files with 1443417 additions and 227283 deletions
CONTRIBUTING.mdGopkg.lockGopkg.tomlMakefile
vendor/cloud.google.com/go

@ -235,15 +235,15 @@ in the `vendor` directory for perfectly reproducable builds.
The `vendor` directory is entirely managed by the `dep` tool.
To add a new dependency
To add a new dependency, run `dep ensure` and `dep` will pull in the
new dependency to the `vendor` directory and update the `Gopkg.lock`
file.
dep ensure -add github.com/pkg/errors
You can add constraints on that package in the `Gopkg.toml` file (see
the `dep` documentation), but don't unless you really need to.
You can add constraints on that package (see the `dep` documentation),
but don't unless you really need to.
Please check in the changes generated by dep including the `vendor`
directory and `Godep.toml` and `Godep.locl` in a single commit
Please check in the changes generated by `dep` including the `vendor`
directory and `Godep.toml` and `Godep.lock` in a single commit
separate from any other code changes. Watch out for new files in
`vendor`.

104
Gopkg.lock generated

@ -9,13 +9,13 @@
"fs",
"fuseutil"
]
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "20d4028b8a750c2aca76bf9fefa8ed2d0109b573"
version = "v0.19.0"
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
version = "v0.21.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
@ -23,8 +23,8 @@
"storage",
"version"
]
revision = "e67cd39e942c417ae5e9ae1165f778d9fe8996e0"
version = "v14.5.0"
revision = "4650843026a7fdec254a8d9cf893693a254edd0b"
version = "v16.2.1"
[[projects]]
name = "github.com/Azure/go-autorest"
@ -34,8 +34,8 @@
"autorest/azure",
"autorest/date"
]
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
version = "v10.2.0"
revision = "eaa7994b2278094c904d31993d26f56324db3052"
version = "v10.8.1"
[[projects]]
branch = "master"
@ -44,16 +44,16 @@
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
[[projects]]
branch = "master"
name = "github.com/VividCortex/ewma"
packages = ["."]
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
version = "v1.1.1"
[[projects]]
branch = "master"
name = "github.com/a8m/tree"
packages = ["."]
revision = "cf42b1e486f0b025942a768a9ad59c9939d6ca40"
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
[[projects]]
name = "github.com/abbot/go-http-auth"
@ -62,7 +62,6 @@
version = "v0.4.0"
[[projects]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
@ -95,13 +94,14 @@
"service/s3/s3manager",
"service/sts"
]
revision = "12fe7d35f8ad5f7f2715d414624b0723737de1f7"
revision = "4f5d298bd2dcb34b06d944594f458d1f77ac4d66"
version = "v1.13.42"
[[projects]]
name = "github.com/billziss-gh/cgofuse"
packages = ["fuse"]
revision = "487e2baa5611bab252a906d7f9b869f944607305"
version = "v1.0.4"
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
version = "v1.1.0"
[[projects]]
branch = "master"
@ -134,7 +134,6 @@
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
packages = [
"dropbox",
@ -148,19 +147,20 @@
"dropbox/users",
"dropbox/users_common"
]
revision = "f0b3f3ded6d415a94e83e9a514fb8025e4e6be31"
revision = "81ac5b288ffc03b166174f520cdc0b227461450e"
version = "v4.0.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0"
version = "v1.33.0"
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
version = "v1.36.0"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
@ -178,7 +178,7 @@
branch = "master"
name = "github.com/jlaffaye/ftp"
packages = ["."]
revision = "427467931c6fbc25acba4537c9d3cbc40cfa569b"
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
[[projects]]
name = "github.com/jmespath/go-jmespath"
@ -219,13 +219,13 @@
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
[[projects]]
branch = "master"
name = "github.com/nsf/termbox-go"
packages = ["."]
revision = "e2050e41c8847748ec5288741c0b19a8cb26d084"
revision = "5a49b82160547cc98fca189a677a1c14eff796f8"
[[projects]]
branch = "master"
@ -252,16 +252,16 @@
version = "v0.1.1"
[[projects]]
branch = "master"
name = "github.com/pkg/errors"
packages = ["."]
revision = "816c9085562cd7ee03e7f8188a1cfd942858cded"
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "43ec6c679d353f6e077d3965dc74f6d996eb4a09"
version = "1.5.1"
revision = "5bf2a174b604c6b5549dd9740d924ff2f02e3ad7"
version = "1.6.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
@ -270,10 +270,10 @@
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/rfjakob/eme"
packages = ["."]
revision = "2222dbd4ba467ab3fc7e8af41562fcfe69c0d770"
revision = "01668ae55fe0b79a483095689043cce3e80260db"
version = "v1.1"
[[projects]]
name = "github.com/russross/blackfriday"
@ -291,7 +291,7 @@
branch = "master"
name = "github.com/sevlyar/go-daemon"
packages = ["."]
revision = "32749a731f76154d29bc6a547e6585f320eb235e"
revision = "45a2ba1b7c6710a044163fa109bf08d060bc3afa"
[[projects]]
branch = "master"
@ -300,34 +300,34 @@
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
[[projects]]
branch = "master"
name = "github.com/spf13/cobra"
packages = [
".",
"doc"
]
revision = "c439c4fa093711d42e1b01acb1235b52004753c1"
revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/spf13/pflag"
packages = ["."]
revision = "ee5fd03fd6acfd43e44aea0b4135958546ed8e73"
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "380174f817a09abe5982a82f94ad50938a8df65d"
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "4e68b16e97ffc3b77abacbf727817a4d48fb0b66"
revision = "3ba49835f4db01d6329782cbdc7a0a8bb3a26c5f"
[[projects]]
branch = "master"
@ -336,7 +336,6 @@
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
packages = [
".",
@ -351,7 +350,8 @@
"service",
"utils"
]
revision = "a3cbaaf92247eaf55751a7ff37c126c511757492"
revision = "9e88dc1b83728e1462fd74bb61b0f5e28ac95bb6"
version = "v2.2.12"
[[projects]]
branch = "master"
@ -372,7 +372,7 @@
"ssh/agent",
"ssh/terminal"
]
revision = "c3a3ad6d03f7a915c0f7e194b7152974bb73d287"
revision = "4ec37c66abab2c7e02ae775328b2ff001c3f025a"
[[projects]]
branch = "master"
@ -382,15 +382,17 @@
"context/ctxhttp",
"html",
"html/atom",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"lex/httplex",
"publicsuffix",
"webdav",
"webdav/internal/xml",
"websocket"
]
revision = "92b859f39abd2d91a854c9f9c4621b2f5054a92d"
revision = "640f4622ab692b87c2f3a94265e6f579fe38263d"
[[projects]]
branch = "master"
@ -402,7 +404,7 @@
"jws",
"jwt"
]
revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb"
revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
[[projects]]
branch = "master"
@ -411,18 +413,15 @@
"unix",
"windows"
]
revision = "d8e400bc7db4870d786864138af681469693d18c"
revision = "6f686a352de66814cdd080d970febae7767857a3"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/language",
"internal/language/compact",
"internal/tag",
"internal/triegen",
"internal/ucd",
@ -434,13 +433,14 @@
"unicode/norm",
"unicode/rangetable"
]
revision = "8c34f848e18c4bd34d02db7f19a0ed1a0a8f5852"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
@ -452,7 +452,7 @@
"googleapi/internal/uritemplates",
"storage/v1"
]
revision = "55e9fb4044f4757138d4273ace23060d022d18f9"
revision = "bb395b674c9930450ea7243b3e3c8f43150f4c11"
[[projects]]
name = "google.golang.org/appengine"
@ -475,12 +475,12 @@
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "09939c0d5f32998497c8304c84dd5c397c88816d235441d87f5306ae5db43b8a"
inputs-digest = "e250c0e18b90fecd81621d7ffcc1580931e668bac9048de910fdf6df8e4a140c"
solver-name = "gps-cdcl"
solver-version = 1

@ -1,154 +1,20 @@
## Gopkg.toml example (these lines may be deleted)
## "required" lists a set of packages (not projects) that must be included in
## Gopkg.lock. This list is merged with the set of packages imported by the current
## project. Use it when your project needs a package it doesn't explicitly import -
## including "main" packages.
# required = ["github.com/user/thing/cmd/thing"]
## "ignored" lists a set of packages (not projects) that are ignored when
## dep statically analyzes source code. Ignored packages can be in this project,
## or in a dependency.
# ignored = ["github.com/user/project/badpkg"]
## Dependencies define constraints on dependent projects. They are respected by
## dep whether coming from the Gopkg.toml of the current project or a dependency.
# [[constraint]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
# github.com/yunify/qingstor-sdk-go depends on an old version of
# github.com/pengsrc/go-shared - pin the version here
#
## Recommended: the version constraint to enforce for the project.
## Only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: an alternate location (URL or import path) for the project's source.
# source = "https://github.com/myfork/package.git"
# When the version here moves on, we can unpin
# https://github.com/yunify/qingstor-sdk-go/blob/master/glide.yaml
[[override]]
version = "=v0.1.1"
name = "github.com/pengsrc/go-shared"
## Overrides have the same structure as [[constraint]], but supercede all
## [[constraint]] declarations from all projects. Only the current project's
## [[override]] are applied.
##
## Overrides are a sledgehammer. Use them only as a last resort.
# [[override]]
## Required: the root import path of the project being constrained.
# name = "github.com/user/project"
#
## Optional: specifying a version constraint override will cause all other
## constraints on this project to be ignored; only the overriden constraint
## need be satisfied.
## Again, only one of "branch", "version" or "revision" can be specified.
# version = "1.0.0"
# branch = "master"
# revision = "abc123"
#
## Optional: specifying an alternate source location as an override will
## enforce that the alternate location is used for that project, regardless of
## what source location any dependent projects specify.
# source = "https://github.com/myfork/package.git"
[[constraint]]
branch = "master"
name = "bazil.org/fuse"
[[constraint]]
branch = "master"
name = "github.com/Unknwon/goconfig"
[[constraint]]
branch = "master"
name = "github.com/VividCortex/ewma"
[[constraint]]
branch = "master"
name = "github.com/aws/aws-sdk-go"
[[constraint]]
branch = "master"
name = "github.com/ncw/go-acd"
[[constraint]]
branch = "master"
name = "github.com/ncw/swift"
[[constraint]]
branch = "master"
name = "github.com/pkg/errors"
[[constraint]]
branch = "master"
name = "github.com/pkg/sftp"
[[constraint]]
branch = "master"
name = "github.com/rfjakob/eme"
[[constraint]]
branch = "master"
name = "github.com/skratchdot/open-golang"
[[constraint]]
branch = "master"
name = "github.com/spf13/cobra"
[[constraint]]
branch = "master"
name = "github.com/spf13/pflag"
[[constraint]]
branch = "master"
name = "github.com/stretchr/testify"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
[[constraint]]
branch = "master"
name = "golang.org/x/sys"
[[constraint]]
branch = "master"
name = "golang.org/x/text"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
branch = "master"
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
[[constraint]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"
[[constraint]]
branch = "master"
name = "github.com/coreos/bbolt"
[[constraint]]
branch = "master"
name = "github.com/patrickmn/go-cache"
[[constraint]]
branch = "master"
name = "github.com/okzk/sdnotify"
[[constraint]]
# pin this to master to pull in the macOS changes
# can likely remove for 1.42
[[override]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
[[constraint]]
# pin this to master to pull in the fix for linux/mips
# can likely remove for 1.42
[[override]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
name = "github.com/coreos/bbolt"

@ -62,7 +62,6 @@ ifdef FULL_TESTS
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
go get -u github.com/inconshreveable/mousetrap
go get -u github.com/tools/godep
endif

@ -22,6 +22,7 @@ David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>

48
vendor/cloud.google.com/go/README.md generated vendored

@ -33,6 +33,54 @@ make backwards-incompatible changes.
## News
_April 9, 2018_
*v0.21.0*
- bigquery:
- Add OpenCensus tracing.
- firestore:
- **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot
whose Exists method returns false. DocumentRef.Get and Transaction.Get
return the non-nil DocumentSnapshot in addition to a NotFound error.
**DocumentRef.GetAll and Transaction.GetAll return a non-nil
DocumentSnapshot instead of nil.**
- Add DocumentIterator.Stop. **Call Stop whenever you are done with a
DocumentIterator.**
- Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime
notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen.
- Canceling an RPC now always returns a grpc.Status with codes.Canceled.
- spanner:
- Add `CommitTimestamp`, which supports inserting the commit timestamp of a
transaction into a column.
_March 22, 2018_
*v0.20.0*
- bigquery: Support SchemaUpdateOptions for load jobs.
- bigtable:
- Add SampleRowKeys.
- cbt: Support union, intersection GCPolicy.
- Retry admin RPCS.
- Add trace spans to retries.
- datastore: Add OpenCensus tracing.
- firestore:
- Fix queries involving Null and NaN.
- Allow Timestamp protobuffers for time values.
- logging: Add a WriteTimeout option.
- spanner: Support Batch API.
- storage: Add OpenCensus tracing.
_February 26, 2018_
*v0.19.0*

@ -47,6 +47,11 @@ func setClientHeader(headers http.Header) {
// Client may be used to perform BigQuery operations.
type Client struct {
// Location, if set, will be used as the default location for all subsequent
// dataset creation and job operations. A location specified directly in one of
// those operations will override this value.
Location string
projectID string
bqs *bq.Service
}

@ -100,7 +100,7 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) {
func (c *Copier) newJob() *bq.Job {
return &bq.Job{
JobReference: c.JobIDConfig.createJobRef(c.c.projectID),
JobReference: c.JobIDConfig.createJobRef(c.c),
Configuration: c.CopyConfig.toBQ(),
}
}

@ -49,11 +49,12 @@ func defaultCopyJob() *bq.Job {
func TestCopy(t *testing.T) {
defer fixRandomID("RANDOM")()
testCases := []struct {
dst *Table
srcs []*Table
jobID string
config CopyConfig
want *bq.Job
dst *Table
srcs []*Table
jobID string
location string
config CopyConfig
want *bq.Job
}{
{
dst: &Table{
@ -118,12 +119,33 @@ func TestCopy(t *testing.T) {
return j
}(),
},
{
dst: &Table{
ProjectID: "d-project-id",
DatasetID: "d-dataset-id",
TableID: "d-table-id",
},
srcs: []*Table{
{
ProjectID: "s-project-id",
DatasetID: "s-dataset-id",
TableID: "s-table-id",
},
},
location: "asia-northeast1",
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference.Location = "asia-northeast1"
return j
}(),
},
}
c := &Client{projectID: "client-project-id"}
for i, tc := range testCases {
tc.dst.c = c
copier := tc.dst.CopierFrom(tc.srcs...)
copier.JobID = tc.jobID
copier.Location = tc.location
tc.config.Srcs = tc.srcs
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config

@ -20,6 +20,7 @@ import (
"time"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -85,12 +86,19 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := md.toBQ()
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
// Use Client.Location as a default.
if ds.Location == "" {
ds.Location = d.c.Location
}
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
setClientHeader(call.Header())
_, err = call.Do()
@ -140,14 +148,20 @@ func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
}
// Delete deletes the dataset.
func (d *Dataset) Delete(ctx context.Context) error {
func (d *Dataset) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
return call.Do()
}
// Metadata fetches the metadata for the dataset.
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
setClientHeader(call.Header())
var ds *bq.Dataset
@ -186,7 +200,10 @@ func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update")
defer func() { trace.EndSpan(ctx, err) }()
ds, err := dm.toBQ()
if err != nil {
return nil, err

@ -55,7 +55,7 @@ func TestDataTransferServiceSmoke(t *testing.T) {
t.Fatal(err)
}
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", projectId, "us-central1")
var formattedParent string = fmt.Sprintf("projects/%s", projectId)
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}

@ -294,7 +294,7 @@ func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.Li
return it
}
// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time].
// For each date - or whatever granularity the data source supports - in the
// range, one transfer run is created.
// Note that runs are created per UTC time in the time range.

@ -42,8 +42,6 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
}
}

@ -281,7 +281,7 @@ func TestDataTransferServiceGetDataSource(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
var request = &datatransferpb.GetDataSourceRequest{
Name: formattedName,
}
@ -310,7 +310,7 @@ func TestDataTransferServiceGetDataSourceError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
var request = &datatransferpb.GetDataSourceRequest{
Name: formattedName,
}
@ -343,7 +343,7 @@ func TestDataTransferServiceListDataSources(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}
@ -382,7 +382,7 @@ func TestDataTransferServiceListDataSourcesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &datatransferpb.ListDataSourcesRequest{
Parent: formattedParent,
}
@ -428,7 +428,7 @@ func TestDataTransferServiceCreateTransferConfig(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
var request = &datatransferpb.CreateTransferConfigRequest{
Parent: formattedParent,
@ -459,7 +459,7 @@ func TestDataTransferServiceCreateTransferConfigError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
var request = &datatransferpb.CreateTransferConfigRequest{
Parent: formattedParent,
@ -567,7 +567,7 @@ func TestDataTransferServiceDeleteTransferConfig(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.DeleteTransferConfigRequest{
Name: formattedName,
}
@ -593,7 +593,7 @@ func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.DeleteTransferConfigRequest{
Name: formattedName,
}
@ -638,7 +638,7 @@ func TestDataTransferServiceGetTransferConfig(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.GetTransferConfigRequest{
Name: formattedName,
}
@ -667,7 +667,7 @@ func TestDataTransferServiceGetTransferConfigError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.GetTransferConfigRequest{
Name: formattedName,
}
@ -700,7 +700,7 @@ func TestDataTransferServiceListTransferConfigs(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &datatransferpb.ListTransferConfigsRequest{
Parent: formattedParent,
}
@ -739,7 +739,7 @@ func TestDataTransferServiceListTransferConfigsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
var formattedParent string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &datatransferpb.ListTransferConfigsRequest{
Parent: formattedParent,
}
@ -766,7 +766,7 @@ func TestDataTransferServiceScheduleTransferRuns(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var startTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
var endTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
var request = &datatransferpb.ScheduleTransferRunsRequest{
@ -799,7 +799,7 @@ func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var startTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
var endTime *timestamppb.Timestamp = &timestamppb.Timestamp{}
var request = &datatransferpb.ScheduleTransferRunsRequest{
@ -841,7 +841,7 @@ func TestDataTransferServiceGetTransferRun(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.GetTransferRunRequest{
Name: formattedName,
}
@ -870,7 +870,7 @@ func TestDataTransferServiceGetTransferRunError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.GetTransferRunRequest{
Name: formattedName,
}
@ -897,7 +897,7 @@ func TestDataTransferServiceDeleteTransferRun(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.DeleteTransferRunRequest{
Name: formattedName,
}
@ -923,7 +923,7 @@ func TestDataTransferServiceDeleteTransferRunError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedName string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.DeleteTransferRunRequest{
Name: formattedName,
}
@ -955,7 +955,7 @@ func TestDataTransferServiceListTransferRuns(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.ListTransferRunsRequest{
Parent: formattedParent,
}
@ -994,7 +994,7 @@ func TestDataTransferServiceListTransferRunsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s", "[PROJECT]", "[TRANSFER_CONFIG]")
var request = &datatransferpb.ListTransferRunsRequest{
Parent: formattedParent,
}
@ -1027,7 +1027,7 @@ func TestDataTransferServiceListTransferLogs(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.ListTransferLogsRequest{
Parent: formattedParent,
}
@ -1066,7 +1066,7 @@ func TestDataTransferServiceListTransferLogsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
var formattedParent string = fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[TRANSFER_CONFIG]", "[RUN]")
var request = &datatransferpb.ListTransferLogsRequest{
Parent: formattedParent,
}
@ -1096,7 +1096,7 @@ func TestDataTransferServiceCheckValidCreds(t *testing.T) {
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
var request = &datatransferpb.CheckValidCredsRequest{
Name: formattedName,
}
@ -1125,7 +1125,7 @@ func TestDataTransferServiceCheckValidCredsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDataTransfer.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
var formattedName string = fmt.Sprintf("projects/%s/dataSources/%s", "[PROJECT]", "[DATA_SOURCE]")
var request = &datatransferpb.CheckValidCredsRequest{
Name: formattedName,
}

@ -15,6 +15,7 @@
package bigquery
import (
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -93,13 +94,16 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
}
// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run")
defer func() { trace.EndSpan(ctx, err) }()
return e.c.insertJob(ctx, e.newJob(), nil)
}
func (e *Extractor) newJob() *bq.Job {
return &bq.Job{
JobReference: e.JobIDConfig.createJobRef(e.c.projectID),
JobReference: e.JobIDConfig.createJobRef(e.c),
Configuration: e.ExtractConfig.toBQ(),
}
}

@ -59,6 +59,9 @@ var (
tableIDs = testutil.NewUIDSpaceSep("table", '_')
)
// Note: integration tests cannot be run in parallel, because TestIntegration_Location
// modifies the client.
func TestMain(m *testing.M) {
cleanup := initIntegrationTest()
r := m.Run()
@ -943,23 +946,23 @@ func TestIntegration_TableUpdate(t *testing.T) {
// Error cases when updating schema.
for _, test := range []struct {
desc string
fields []*FieldSchema
fields Schema
}{
{"change from optional to required", []*FieldSchema{
{"change from optional to required", Schema{
{Name: "name", Type: StringFieldType, Required: true},
schema3[1],
schema3[2],
schema3[3],
}},
{"add a required field", []*FieldSchema{
{"add a required field", Schema{
schema3[0], schema3[1], schema3[2], schema3[3],
{Name: "req", Type: StringFieldType, Required: true},
}},
{"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}},
{"remove a nested field", []*FieldSchema{
{"remove a field", Schema{schema3[0], schema3[1], schema3[2]}},
{"remove a nested field", Schema{
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
{"remove all nested fields", []*FieldSchema{
{"remove all nested fields", Schema{
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
} {
@ -1603,6 +1606,117 @@ func TestIntegration_ListJobs(t *testing.T) {
}
}
const tokyo = "asia-northeast1"
func TestIntegration_Location(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
client.Location = ""
testLocation(t, tokyo)
client.Location = tokyo
defer func() {
client.Location = ""
}()
testLocation(t, "")
}
func testLocation(t *testing.T, loc string) {
ctx := context.Background()
tokyoDataset := client.Dataset("tokyo")
err := tokyoDataset.Create(ctx, &DatasetMetadata{Location: loc})
if err != nil && !hasStatusCode(err, 409) { // 409 = already exists
t.Fatal(err)
}
md, err := tokyoDataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if md.Location != tokyo {
t.Fatalf("dataset location: got %s, want %s", md.Location, tokyo)
}
table := tokyoDataset.Table(tableIDs.New())
err = table.Create(context.Background(), &TableMetadata{
Schema: Schema{
{Name: "name", Type: StringFieldType},
{Name: "nums", Type: IntegerFieldType},
},
ExpirationTime: testTableExpiration,
})
if err != nil {
t.Fatal(err)
}
defer table.Delete(ctx)
loader := table.LoaderFrom(NewReaderSource(strings.NewReader("a,0\nb,1\nc,2\n")))
loader.Location = loc
job, err := loader.Run(ctx)
if err != nil {
t.Fatal("loader.Run", err)
}
if job.Location() != tokyo {
t.Fatalf("job location: got %s, want %s", job.Location(), tokyo)
}
_, err = client.JobFromID(ctx, job.ID())
if client.Location == "" && err == nil {
t.Error("JobFromID with Tokyo job, no client location: want error, got nil")
}
if client.Location != "" && err != nil {
t.Errorf("JobFromID with Tokyo job, with client location: want nil, got %v", err)
}
_, err = client.JobFromIDLocation(ctx, job.ID(), "US")
if err == nil {
t.Error("JobFromIDLocation with US: want error, got nil")
}
job2, err := client.JobFromIDLocation(ctx, job.ID(), loc)
if loc == tokyo && err != nil {
t.Errorf("loc=tokyo: %v", err)
}
if loc == "" && err == nil {
t.Error("loc empty: got nil, want error")
}
if job2 != nil && (job2.ID() != job.ID() || job2.Location() != tokyo) {
t.Errorf("got id %s loc %s, want id%s loc %s", job2.ID(), job2.Location(), job.ID(), tokyo)
}
if err := wait(ctx, job); err != nil {
t.Fatal(err)
}
// Cancel should succeed even if the job is done.
if err := job.Cancel(ctx); err != nil {
t.Fatal(err)
}
q := client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
q.Location = loc
iter, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
}
wantRows := [][]Value{
[]Value{"a", int64(0)},
[]Value{"b", int64(1)},
[]Value{"c", int64(2)},
}
checkRead(t, "location", iter, wantRows)
table2 := tokyoDataset.Table(tableIDs.New())
copier := table2.CopierFrom(table)
copier.Location = loc
if _, err := copier.Run(ctx); err != nil {
t.Fatal(err)
}
bucketName := testutil.ProjID()
objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
gr := NewGCSReference(uri)
gr.DestinationFormat = CSV
e := table.ExtractorTo(gr)
e.Location = loc
if _, err := e.Run(ctx); err != nil {
t.Fatal(err)
}
}
// Creates a new, temporary table with a unique name and the given schema.
func newTable(t *testing.T, s Schema) *Table {
table := dataset.Table(tableIDs.New())

@ -23,6 +23,7 @@ import (
"time"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/trace"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -35,6 +36,7 @@ type Job struct {
c *Client
projectID string
jobID string
location string
config *bq.JobConfiguration
lastStatus *JobStatus
@ -43,8 +45,21 @@ type Job struct {
// JobFromID creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package. For example, the job may have
// been created in the BigQuery console.
//
// For jobs whose location is other than "US" or "EU", set Client.Location or use
// JobFromIDLocation.
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
return c.JobFromIDLocation(ctx, id, c.Location)
}
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job
// need not have been created by this package (for example, it may have
// been created in the BigQuery console), but it must exist in the specified location.
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics")
if err != nil {
return nil, err
}
@ -56,6 +71,11 @@ func (j *Job) ID() string {
return j.jobID
}
// Location returns the job's location.
func (j *Job) Location() string {
return j.location
}
// State is one of a sequence of states that a Job progresses through as it is processed.
type State int
@ -120,14 +140,20 @@ type JobIDConfig struct {
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Location is the location for the job.
Location string
}
// createJobRef creates a JobReference.
// projectID must be non-empty.
func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference {
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
jr := &bq.JobReference{ProjectId: projectID}
loc := j.Location
if loc == "" { // Use Client.Location as a default.
loc = c.Location
}
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc}
if j.JobID == "" {
jr.JobId = randomIDFn()
} else if j.AddJobIDSuffix {
@ -175,8 +201,11 @@ func (s *JobStatus) Err() error {
}
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status")
defer func() { trace.EndSpan(ctx, err) }()
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics")
if err != nil {
return nil, err
}
@ -204,6 +233,7 @@ func (j *Job) Cancel(ctx context.Context) error {
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
Location(j.location).
Fields(). // We don't need any of the response data.
Context(ctx)
setClientHeader(call.Header())
@ -218,7 +248,10 @@ func (j *Job) Cancel(ctx context.Context) error {
// If an error occurs while retrieving the status, Wait returns that error. But
// Wait returns nil if the status was retrieved successfully, even if
// status.Err() != nil. So callers must check both errors. See the example.
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait")
defer func() { trace.EndSpan(ctx, err) }()
if j.isQuery() {
// We can avoid polling for query jobs.
if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
@ -232,8 +265,7 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
return js, nil
}
// Non-query jobs must poll.
var js *JobStatus
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
js, err = j.Status(ctx)
if err != nil {
return true, err
@ -251,7 +283,10 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
// Read fetches the results of a query job.
// If j is not a query job, Read returns an error.
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read")
defer func() { trace.EndSpan(ctx, err) }()
return j.read(ctx, j.waitForQuery, fetchPage)
}
@ -281,7 +316,7 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
// waitForQuery waits for the query job to complete and returns its schema.
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
// Use GetQueryResults only to wait for completion, not to read results.
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0)
setClientHeader(call.Header())
backoff := gax.Backoff{
Initial: 1 * time.Second,
@ -525,9 +560,12 @@ func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
}
func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
if location != "" {
call = call.Location(location)
}
if len(fields) > 0 {
call = call.Fields(fields...)
}
@ -550,6 +588,7 @@ func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt
j := &Job{
projectID: qr.ProjectId,
jobID: qr.JobId,
location: qr.Location,
c: c,
}
j.setConfig(qc)

@ -23,37 +23,52 @@ import (
func TestCreateJobRef(t *testing.T) {
defer fixRandomID("RANDOM")()
cNoLoc := &Client{projectID: "projectID"}
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"}
for _, test := range []struct {
jobID string
addJobIDSuffix bool
want string
in JobIDConfig
client *Client
want *bq.JobReference
}{
{
jobID: "foo",
addJobIDSuffix: false,
want: "foo",
in: JobIDConfig{JobID: "foo"},
want: &bq.JobReference{JobId: "foo"},
},
{
jobID: "",
addJobIDSuffix: false,
want: "RANDOM",
in: JobIDConfig{},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
jobID: "",
addJobIDSuffix: true, // irrelevant
want: "RANDOM",
in: JobIDConfig{AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "RANDOM"},
},
{
jobID: "foo",
addJobIDSuffix: true,
want: "foo-RANDOM",
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true},
want: &bq.JobReference{JobId: "foo-RANDOM"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
{
in: JobIDConfig{JobID: "foo"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"},
},
{
in: JobIDConfig{JobID: "foo", Location: "loc"},
client: cLoc,
want: &bq.JobReference{JobId: "foo", Location: "loc"},
},
} {
jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix}
jr := jc.createJobRef("projectID")
got := jr.JobId
if got != test.want {
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
client := test.client
if client == nil {
client = cNoLoc
}
got := test.in.createJobRef(client)
test.want.ProjectId = "projectID"
if !testutil.Equal(got, test.want) {
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want)
}
}
}

@ -17,6 +17,7 @@ package bigquery
import (
"io"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -45,6 +46,10 @@ type LoadConfig struct {
// Custom encryption configuration (e.g., Cloud KMS keys).
DestinationEncryptionConfig *EncryptionConfig
// SchemaUpdateOptions allows the schema of the destination table to be
// updated as a side effect of the load job.
SchemaUpdateOptions []string
}
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
@ -56,6 +61,7 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
DestinationTable: l.Dst.toBQ(),
TimePartitioning: l.TimePartitioning.toBQ(),
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
SchemaUpdateOptions: l.SchemaUpdateOptions,
},
}
media := l.Src.populateLoadConfig(config.Load)
@ -70,6 +76,7 @@ func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
Dst: bqToTable(q.Load.DestinationTable, c),
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
SchemaUpdateOptions: q.Load.SchemaUpdateOptions,
}
var fc *FileConfig
if len(q.Load.SourceUris) == 0 {
@ -117,7 +124,10 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
}
// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (*Job, error) {
func (l *Loader) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, media := l.newJob()
return l.c.insertJob(ctx, job, media)
}
@ -125,7 +135,7 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) {
func (l *Loader) newJob() (*bq.Job, io.Reader) {
config, media := l.LoadConfig.toBQ()
return &bq.Job{
JobReference: l.JobIDConfig.createJobRef(l.c.projectID),
JobReference: l.JobIDConfig.createJobRef(l.c),
Configuration: config,
}, media
}

@ -74,17 +74,28 @@ func TestLoad(t *testing.T) {
c := &Client{projectID: "client-project-id"}
testCases := []struct {
dst *Table
src LoadSource
jobID string
config LoadConfig
want *bq.Job
dst *Table
src LoadSource
jobID string
location string
config LoadConfig
want *bq.Job
}{
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
want: defaultLoadJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: NewGCSReference("uri"),
location: "loc",
want: func() *bq.Job {
j := defaultLoadJob()
j.JobReference.Location = "loc"
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
jobID: "ajob",
@ -94,6 +105,7 @@ func TestLoad(t *testing.T) {
Labels: map[string]string{"a": "b"},
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"},
},
src: NewGCSReference("uri"),
want: func() *bq.Job {
@ -110,6 +122,7 @@ func TestLoad(t *testing.T) {
JobId: "ajob",
ProjectId: "client-project-id",
}
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"}
return j
}(),
},
@ -226,6 +239,7 @@ func TestLoad(t *testing.T) {
for i, tc := range testCases {
loader := tc.dst.LoaderFrom(tc.src)
loader.JobID = tc.jobID
loader.Location = tc.location
tc.config.Src = tc.src
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config

40
vendor/cloud.google.com/go/bigquery/oc_test.go generated vendored Normal file

@ -0,0 +1,40 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package bigquery
import (
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
)
func TestOCTracing(t *testing.T) {
ctx := context.Background()
client := getClient(t)
defer client.Close()
te := testutil.NewTestExporter()
defer te.Unregister()
q := client.Query("select *")
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way
if len(te.Spans) == 0 {
t.Fatalf("Expected some spans to be created, but got %d", 0)
}
}

@ -17,6 +17,7 @@ package bigquery
import (
"errors"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -260,12 +261,15 @@ func (c *Client) Query(q string) *Query {
}
// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (*Job, error) {
func (q *Query) Run(ctx context.Context) (j *Job, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run")
defer func() { trace.EndSpan(ctx, err) }()
job, err := q.newJob()
if err != nil {
return nil, err
}
j, err := q.client.insertJob(ctx, job, nil)
j, err = q.client.insertJob(ctx, job, nil)
if err != nil {
return nil, err
}
@ -278,7 +282,7 @@ func (q *Query) newJob() (*bq.Job, error) {
return nil, err
}
return &bq.Job{
JobReference: q.JobIDConfig.createJobRef(q.client.projectID),
JobReference: q.JobIDConfig.createJobRef(q.client),
Configuration: config,
}, nil
}

@ -120,9 +120,7 @@ func TestQuery(t *testing.T) {
g.MaxBadRecords = 1
g.Quote = "'"
g.SkipLeadingRows = 2
g.Schema = Schema([]*FieldSchema{
{Name: "name", Type: StringFieldType},
})
g.Schema = Schema{{Name: "name", Type: StringFieldType}}
return g
}(),
},

@ -20,7 +20,6 @@ import (
"reflect"
"cloud.google.com/go/internal/atomiccache"
bq "google.golang.org/api/bigquery/v2"
)

@ -179,7 +179,7 @@ func TestSchemaConversion(t *testing.T) {
Name: "outer",
Required: true,
Type: "RECORD",
Schema: []*FieldSchema{
Schema: Schema{
{
Description: "inner field",
Name: "inner",

@ -19,6 +19,7 @@ import (
"fmt"
"time"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"cloud.google.com/go/internal/optional"
@ -242,7 +243,10 @@ func (t *Table) implicitTable() bool {
// Expiration can only be set during table creation.
// After table creation, a view can be modified only if its table was initially created
// with a view.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create")
defer func() { trace.EndSpan(ctx, err) }()
table, err := tm.toBQ()
if err != nil {
return err
@ -323,11 +327,14 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) {
}
// Metadata fetches the metadata for the table.
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
var table *bq.Table
err := runWithRetry(ctx, func() (err error) {
err = runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
@ -378,7 +385,10 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
}
// Delete deletes the table.
func (t *Table) Delete(ctx context.Context) error {
func (t *Table) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
@ -394,7 +404,10 @@ func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
}
// Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
defer func() { trace.EndSpan(ctx, err) }()
bqt := tm.toBQ()
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
setClientHeader(call.Header())

@ -19,6 +19,7 @@ import (
"fmt"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -80,7 +81,10 @@ func (t *Table) Uploader() *Uploader {
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
// the call will run indefinitely. Pass a context with a timeout to prevent
// hanging calls.
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put")
defer func() { trace.EndSpan(ctx, err) }()
savers, err := valueSavers(src)
if err != nil {
return err

@ -541,6 +541,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
type StructSaver struct {
// Schema determines what fields of the struct are uploaded. It should
// match the table's schema.
// Schema is optional for StructSavers that are passed to Uploader.Put.
Schema Schema
// If non-empty, BigQuery will use InsertID to de-duplicate insertions

@ -30,7 +30,7 @@ import (
)
func TestConvertBasicValues(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
{Type: FloatFieldType},
@ -57,7 +57,7 @@ func TestConvertBasicValues(t *testing.T) {
}
func TestConvertTime(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{Type: TimestampFieldType},
{Type: DateFieldType},
{Type: TimeFieldType},
@ -103,9 +103,7 @@ func TestConvertSmallTimes(t *testing.T) {
}
func TestConvertNullValues(t *testing.T) {
schema := []*FieldSchema{
{Type: StringFieldType},
}
schema := Schema{{Type: StringFieldType}}
row := &bq.TableRow{
F: []*bq.TableCell{
{V: nil},
@ -122,7 +120,7 @@ func TestConvertNullValues(t *testing.T) {
}
func TestBasicRepetition(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{Type: IntegerFieldType, Repeated: true},
}
row := &bq.TableRow{
@ -153,7 +151,7 @@ func TestBasicRepetition(t *testing.T) {
}
func TestNestedRecordContainingRepetition(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{
Type: RecordFieldType,
Schema: Schema{
@ -190,7 +188,7 @@ func TestNestedRecordContainingRepetition(t *testing.T) {
}
func TestRepeatedRecordContainingRepetition(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{
Type: RecordFieldType,
Repeated: true,
@ -264,7 +262,7 @@ func TestRepeatedRecordContainingRepetition(t *testing.T) {
}
func TestRepeatedRecordContainingRecord(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{
Type: RecordFieldType,
Repeated: true,
@ -399,7 +397,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
}{
{
vs: ValuesSaver{
Schema: []*FieldSchema{
Schema: Schema{
{Name: "intField", Type: IntegerFieldType},
{Name: "strField", Type: StringFieldType},
{Name: "dtField", Type: DateTimeFieldType},
@ -417,12 +415,12 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
},
{
vs: ValuesSaver{
Schema: []*FieldSchema{
Schema: Schema{
{Name: "intField", Type: IntegerFieldType},
{
Name: "recordField",
Type: RecordFieldType,
Schema: []*FieldSchema{
Schema: Schema{
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
},
},
@ -632,7 +630,7 @@ func TestStructSaverErrors(t *testing.T) {
}
func TestConvertRows(t *testing.T) {
schema := []*FieldSchema{
schema := Schema{
{Type: StringFieldType},
{Type: IntegerFieldType},
{Type: FloatFieldType},
@ -1072,7 +1070,7 @@ func TestStructLoaderErrors(t *testing.T) {
t.Errorf("%T: got nil, want error", bad6)
}
// sl.set's error is sticky, with even good input.
// sl.set's error is sticky, even with good input.
err2 := sl.set(&repStruct{}, repSchema)
if err2 != err {
t.Errorf("%v != %v, expected equal", err2, err)
@ -1090,6 +1088,7 @@ func TestStructLoaderErrors(t *testing.T) {
{Name: "b", Type: BooleanFieldType},
{Name: "s", Type: StringFieldType},
{Name: "d", Type: DateFieldType},
{Name: "r", Type: RecordFieldType, Schema: Schema{{Name: "X", Type: IntegerFieldType}}},
}
type s struct {
I int

@ -106,10 +106,17 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
req := &btapb.ListTablesRequest{
Parent: prefix,
}
res, err := ac.tClient.ListTables(ctx, req)
var res *btapb.ListTablesResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
res, err = ac.tClient.ListTables(ctx, req)
return err
}, retryOptions...)
if err != nil {
return nil, err
}
names := make([]string, 0, len(res.Tables))
for _, tbl := range res.Tables {
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
@ -227,10 +234,18 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo,
req := &btapb.GetTableRequest{
Name: prefix + "/tables/" + table,
}
res, err := ac.tClient.GetTable(ctx, req)
var res *btapb.Table
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
res, err = ac.tClient.GetTable(ctx, req)
return err
}, retryOptions...)
if err != nil {
return nil, err
}
ti := &TableInfo{}
for name, fam := range res.ColumnFamilies {
ti.Families = append(ti.Families, name)

@ -33,6 +33,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const prodAddr = "bigtable.googleapis.com:443"
@ -83,6 +84,7 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: btpb.NewBigtableClient(conn),
@ -148,7 +150,11 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
ctx = mergeOutgoingMetadata(ctx, t.md)
var prevRowKey string
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows")
defer func() { traceEndSpan(ctx, err) }()
attrMap := make(map[string]interface{})
err = gax.Invoke(ctx, func(ctx context.Context) error {
if !arg.valid() {
// Empty row set, no need to make an API call.
// NOTE: we must return early if arg == RowList{} because reading
@ -166,6 +172,7 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()
startTime := time.Now()
stream, err := t.c.client.ReadRows(ctx, req)
if err != nil {
return err
@ -179,6 +186,10 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
if err != nil {
// Reset arg for next Invoke call.
arg = arg.retainRowsAfter(prevRowKey)
attrMap["rowKey"] = prevRowKey
attrMap["error"] = err.Error()
attrMap["time_secs"] = time.Since(startTime).Seconds()
tracePrintf(ctx, attrMap, "Retry details in ReadRows")
return err
}
@ -463,6 +474,9 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
}
}
var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply")
defer func() { traceEndSpan(ctx, err) }()
var callOptions []gax.CallOption
if m.cond == nil {
req := &btpb.MutateRowRequest{
@ -508,7 +522,7 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
callOptions = retryOptions
}
var cmRes *btpb.CheckAndMutateRowResponse
err := gax.Invoke(ctx, func(ctx context.Context) error {
err = gax.Invoke(ctx, func(ctx context.Context) error {
var err error
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
return err
@ -643,7 +657,13 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
// entries will be reduced after each invocation to just what needs to be retried.
entries := make([]*entryErr, len(rowKeys))
copy(entries, origEntries)
err := gax.Invoke(ctx, func(ctx context.Context) error {
var err error
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk")
defer func() { traceEndSpan(ctx, err) }()
attrMap := make(map[string]interface{})
err = gax.Invoke(ctx, func(ctx context.Context) error {
attrMap["rowCount"] = len(entries)
tracePrintf(ctx, attrMap, "Row count in ApplyBulk")
err := t.doApplyBulk(ctx, entries, opts...)
if err != nil {
// We want to retry the entire request with the current entries
@ -653,11 +673,10 @@ func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutatio
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
// We have at least one mutation that needs to be retried.
// Return an arbitrary error that is retryable according to callOptions.
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
}
return nil
}, retryOptions...)
if err != nil {
return nil, err
}
@ -722,11 +741,11 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
}
for i, entry := range res.Entries {
status := entry.Status
if status.Code == int32(codes.OK) {
s := entry.Status
if s.Code == int32(codes.OK) {
entryErrs[i].Err = nil
} else {
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message)
}
}
after(res)
@ -826,3 +845,40 @@ func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context
mdCopy, _ := metadata.FromOutgoingContext(ctx)
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
}
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) {
ctx = mergeOutgoingMetadata(ctx, t.md)
var sampledRowKeys []string
err := gax.Invoke(ctx, func(ctx context.Context) error {
sampledRowKeys = nil
req := &btpb.SampleRowKeysRequest{
TableName: t.c.fullTableName(t.table),
AppProfileId: t.c.appProfile,
}
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
defer cancel()
stream, err := t.c.client.SampleRowKeys(ctx, req)
if err != nil {
return err
}
for {
res, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
key := string(res.RowKey)
if key == "" {
continue
}
sampledRowKeys = append(sampledRowKeys, key)
}
return nil
}, retryOptions...)
return sampledRowKeys, err
}

@ -1063,3 +1063,101 @@ func clearTimestamps(r Row) {
}
}
}
func TestSampleRowKeys(t *testing.T) {
start := time.Now()
lastCheckpoint := start
checkpoint := func(s string) {
n := time.Now()
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
lastCheckpoint = n
}
ctx := context.Background()
client, adminClient, table, err := doSetup(ctx)
if err != nil {
t.Fatalf("%v", err)
}
defer client.Close()
defer adminClient.Close()
tbl := client.Open(table)
// Delete the table at the end of the test.
// Do this even before creating the table so that if this is running
// against production and CreateTable fails there's a chance of cleaning it up.
defer adminClient.DeleteTable(ctx, table)
// Insert some data.
initialData := map[string][]string{
"wmckinley11": {"tjefferson11"},
"gwashington77": {"jadams77"},
"tjefferson0": {"gwashington0", "jadams0"},
}
for row, ss := range initialData {
mut := NewMutation()
for _, name := range ss {
mut.Set("follows", name, 0, []byte("1"))
}
if err := tbl.Apply(ctx, row, mut); err != nil {
t.Errorf("Mutating row %q: %v", row, err)
}
}
checkpoint("inserted initial data")
sampleKeys, err := tbl.SampleRowKeys(context.Background())
if err != nil {
t.Errorf("%s: %v", "SampleRowKeys:", err)
}
if len(sampleKeys) == 0 {
t.Error("SampleRowKeys length 0")
}
checkpoint("tested SampleRowKeys.")
}
func doSetup(ctx context.Context) (*Client, *AdminClient, string, error) {
start := time.Now()
lastCheckpoint := start
checkpoint := func(s string) {
n := time.Now()
fmt.Printf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
lastCheckpoint = n
}
testEnv, err := NewIntegrationEnv()
if err != nil {
return nil, nil, "", fmt.Errorf("IntegrationEnv: %v", err)
}
var timeout time.Duration
if testEnv.Config().UseProd {
timeout = 10 * time.Minute
fmt.Printf("Running test against production")
} else {
timeout = 1 * time.Minute
fmt.Printf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
client, err := testEnv.NewClient()
if err != nil {
return nil, nil, "", fmt.Errorf("Client: %v", err)
}
checkpoint("dialed Client")
adminClient, err := testEnv.NewAdminClient()
if err != nil {
return nil, nil, "", fmt.Errorf("AdminClient: %v", err)
}
checkpoint("dialed AdminClient")
table := testEnv.Config().Table
if err := adminClient.CreateTable(ctx, table); err != nil {
return nil, nil, "", fmt.Errorf("Creating table: %v", err)
}
checkpoint("created table")
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
return nil, nil, "", fmt.Errorf("Creating column family: %v", err)
}
checkpoint(`created "follows" column family`)
return client, adminClient, table, nil
}

@ -121,7 +121,7 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest)
s.mu.Lock()
if _, ok := s.tables[tbl]; ok {
s.mu.Unlock()
return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
return nil, status.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
}
s.tables[tbl] = newTable(req)
s.mu.Unlock()
@ -151,7 +151,7 @@ func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*bta
tblIns, ok := s.tables[tbl]
s.mu.Unlock()
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl)
return nil, status.Errorf(codes.NotFound, "table %q not found", tbl)
}
return &btapb.Table{
@ -177,7 +177,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
tbl, ok := s.tables[req.Name]
s.mu.Unlock()
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
}
tbl.mu.Lock()
@ -186,7 +186,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
for _, mod := range req.Modifications {
if create := mod.GetCreate(); create != nil {
if _, ok := tbl.families[mod.Id]; ok {
return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
return nil, status.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
}
newcf := &columnFamily{
name: req.Name + "/columnFamilies/" + mod.Id,

@ -1074,12 +1074,12 @@ func doSet(ctx context.Context, args ...string) {
func doSetGCPolicy(ctx context.Context, args ...string) {
if len(args) < 3 {
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> | maxage=<d> (and|or) maxversions=<n> )")
}
table := args[0]
fam := args[1]
pol, err := parseGCPolicy(args[2])
pol, err := parseGCPolicy(strings.Join(args[2:], " "))
if err != nil {
log.Fatal(err)
}
@ -1101,24 +1101,55 @@ func doWaitForReplicaiton(ctx context.Context, args ...string) {
}
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
var pol bigtable.GCPolicy
switch p := policyStr; {
case strings.HasPrefix(p, "maxage="):
d, err := parseDuration(p[7:])
words := strings.Fields(policyStr)
switch len(words) {
case 1:
return parseSinglePolicy(words[0])
case 3:
p1, err := parseSinglePolicy(words[0])
if err != nil {
return nil, err
}
pol = bigtable.MaxAgePolicy(d)
case strings.HasPrefix(p, "maxversions="):
n, err := strconv.ParseUint(p[12:], 10, 16)
p2, err := parseSinglePolicy(words[2])
if err != nil {
return nil, err
}
pol = bigtable.MaxVersionsPolicy(int(n))
switch words[1] {
case "and":
return bigtable.IntersectionPolicy(p1, p2), nil
case "or":
return bigtable.UnionPolicy(p1, p2), nil
default:
return nil, fmt.Errorf("Expected 'and' or 'or', saw %q", words[1])
}
default:
return nil, fmt.Errorf("Bad GC policy %q", p)
return nil, fmt.Errorf("Expected '1' or '3' parameter count, saw %d", len(words))
}
return pol, nil
return nil, nil
}
func parseSinglePolicy(s string) (bigtable.GCPolicy, error) {
words := strings.Split(s, "=")
if len(words) != 2 {
return nil, fmt.Errorf("Expected 'name=value', got %q", words)
}
switch words[0] {
case "maxage":
d, err := parseDuration(words[1])
if err != nil {
return nil, err
}
return bigtable.MaxAgePolicy(d), nil
case "maxversions":
n, err := strconv.ParseUint(words[1], 10, 16)
if err != nil {
return nil, err
}
return bigtable.MaxVersionsPolicy(int(n)), nil
default:
return nil, fmt.Errorf("Expected 'maxage' or 'maxversions', got %q", words[1])
}
return nil, nil
}
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {

@ -17,6 +17,9 @@ package main
import (
"testing"
"time"
"cloud.google.com/go/bigtable"
"github.com/google/go-cmp/cmp"
)
func TestParseDuration(t *testing.T) {
@ -57,3 +60,54 @@ func TestParseDuration(t *testing.T) {
}
}
}
func TestParseGCPolicy(t *testing.T) {
tests := []struct {
in string
out bigtable.GCPolicy
fail bool
}{
{in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)},
{in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))},
{in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
{in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)},
{in: "maxage=1", fail: true},
{in: "maxage = 1h", fail: true},
{in: "maxage =1h", fail: true},
{in: "maxage= 1h", fail: true},
{in: "foomaxage=1h", fail: true},
{in: "maxversions=1h", fail: true},
{in: "maxversions= 1", fail: true},
{in: "maxversions = 1", fail: true},
{in: "maxversions =1", fail: true},
{in: "barmaxversions=1", fail: true},
{in: "maxage = 1h or maxversions=1h", fail: true},
{in: "foomaxversions=2 or maxage=1h", fail: true},
{in: "maxversions=2 or barmaxage=1h", fail: true},
{in: "foomaxversions=2 or barmaxage=1h", fail: true},
{in: "maxage = 1h and maxversions=1h", fail: true},
{in: "foomaxage=1h and maxversions=1", fail: true},
{in: "maxage=1h and barmaxversions=1", fail: true},
{in: "foomaxage=1h and barmaxversions=1", fail: true},
}
for _, tc := range tests {
got, err := parseGCPolicy(tc.in)
if !tc.fail && err != nil {
t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err)
continue
}
if tc.fail && err == nil {
t.Errorf("parseGCPolicy(%q) did not fail", tc.in)
continue
}
if tc.fail {
continue
}
var cmpOpts cmp.Options
cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...)))
if !cmp.Equal(got, tc.out, cmpOpts) {
t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out)
}
}
}

68
vendor/cloud.google.com/go/bigtable/go18.go generated vendored Normal file

@ -0,0 +1,68 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package bigtable
import (
"fmt"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
func openCensusOptions() []option.ClientOption {
return []option.ClientOption{
option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})),
}
}
func traceStartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func traceEndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(trace.Status{Message: err.Error()})
}
span.End()
}
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
switch v := v.(type) {
case string:
a = trace.StringAttribute(k, v)
case bool:
a = trace.BoolAttribute(k, v)
case int:
a = trace.Int64Attribute(k, int64(v))
case int64:
a = trace.Int64Attribute(k, v)
default:
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}

@ -20,8 +20,8 @@ import (
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestRandomizedDelays(t *testing.T) {
@ -43,7 +43,7 @@ func TestRandomizedDelays(t *testing.T) {
}
invokeTime = time.Now()
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
errf := grpc.Errorf
errf := status.Errorf
return errf(codes.Unavailable, "")
}, settings...)
}

36
vendor/cloud.google.com/go/bigtable/not_go18.go generated vendored Normal file

@ -0,0 +1,36 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package bigtable
import (
"golang.org/x/net/context"
"google.golang.org/api/option"
)
// OpenCensus only supports go 1.8 and higher.
func openCensusOptions() []option.ClientOption { return nil }
func traceStartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func traceEndSpan(context.Context, error) {
}
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) {
}

@ -163,7 +163,7 @@ func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row
func (cr *chunkReader) finishCell() {
ri := ReadItem{
Row: string(cr.curKey),
Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
Column: string(cr.curFam) + ":" + string(cr.curQual),
Timestamp: Timestamp(cr.curTS),
Value: cr.curVal,
}

@ -30,6 +30,7 @@ import (
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
@ -42,12 +43,12 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err
return nil, nil, err
}
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock()))
if err != nil {
return nil, nil, err
}
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock()))
if err != nil {
return nil, nil, err
}
@ -76,7 +77,7 @@ func TestRetryApply(t *testing.T) {
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
errCount++
return nil, grpc.Errorf(code, "")
return nil, status.Errorf(code, "")
}
return handler(ctx, req)
}
@ -156,7 +157,7 @@ func TestRetryApplyBulk(t *testing.T) {
f = func(ss grpc.ServerStream) error {
if errCount < 3 {
errCount++
return grpc.Errorf(codes.Aborted, "")
return status.Errorf(codes.Aborted, "")
}
return nil
}
@ -182,7 +183,7 @@ func TestRetryApplyBulk(t *testing.T) {
switch errCount {
case 0:
// Retryable request failure
err = grpc.Errorf(codes.Unavailable, "")
err = status.Errorf(codes.Unavailable, "")
case 1:
// Two mutations fail
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
@ -235,8 +236,8 @@ func TestRetryApplyBulk(t *testing.T) {
t.Errorf("unretryable errors: request failed %v", err)
}
want := []error{
grpc.Errorf(codes.FailedPrecondition, ""),
grpc.Errorf(codes.Aborted, ""),
status.Errorf(codes.FailedPrecondition, ""),
status.Errorf(codes.Aborted, ""),
}
if !testutil.Equal(want, errors) {
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
@ -323,20 +324,20 @@ func TestRetryReadRows(t *testing.T) {
switch errCount {
case 0:
// Retryable request failure
err = grpc.Errorf(codes.Unavailable, "")
err = status.Errorf(codes.Unavailable, "")
case 1:
// Write two rows then error
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
}
writeReadRowsResponse(ss, "a", "b")
err = grpc.Errorf(codes.Unavailable, "")
err = status.Errorf(codes.Unavailable, "")
case 2:
// Retryable request failure
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
t.Errorf("2 range retries: got %q, want %q", got, want)
}
err = grpc.Errorf(codes.Unavailable, "")
err = status.Errorf(codes.Unavailable, "")
case 3:
// Write two more rows
writeReadRowsResponse(ss, "c", "d")

@ -21,6 +21,7 @@ import (
"os"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc"
@ -302,11 +303,14 @@ func (c *Client) Close() error {
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get")
defer func() { trace.EndSpan(ctx, err) }()
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
return ErrInvalidEntityType
}
err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
if me, ok := err.(MultiError); ok {
return me[0]
}
@ -323,7 +327,10 @@ func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti")
defer func() { trace.EndSpan(ctx, err) }()
return c.get(ctx, keys, dst, nil)
}
@ -452,8 +459,11 @@ func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, erro
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
// TODO(jba): rewrite in terms of Mutate.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
@ -471,7 +481,7 @@ func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]
}
// Copy any newly minted keys into the returned keys.
ret := make([]*Key, len(keys))
ret = make([]*Key, len(keys))
for i, key := range keys {
if key.Incomplete() {
// This key is in the mutation results.
@ -541,8 +551,11 @@ func (c *Client) Delete(ctx context.Context, key *Key) error {
}
// DeleteMulti is a batch version of Delete.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
// TODO(jba): rewrite in terms of Mutate.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := deleteMutations(keys)
if err != nil {
return err
@ -580,7 +593,10 @@ func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
//
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
// Mutate returns a MultiError in this case even if there is only one Mutation.
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) ([]*Key, error) {
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate")
defer func() { trace.EndSpan(ctx, err) }()
pmuts, err := mutationProtos(muts)
if err != nil {
return nil, err
@ -595,7 +611,7 @@ func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) ([]*Key, error)
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret := make([]*Key, len(muts))
ret = make([]*Key, len(muts))
for i, mut := range muts {
if mut.key.Incomplete() {
// This key is in the mutation results.

@ -158,14 +158,33 @@ Example code:
}
Slice Fields
A field of slice type corresponds to a Datastore array property, except for []byte, which corresponds
to a Datastore blob.
Zero-length slice fields are not saved. Slice fields of length 1 or greater are saved
as Datastore arrays. When a zero-length Datastore array is loaded into a slice field,
the slice field remains unchanged.
If a non-array value is loaded into a slice field, the result will be a slice with
one element, containing the value.
Loading Nulls
Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value.
Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value.
Loading a Null into a pointer field results in nil.
Loading a Null into a field of struct type is an error.
Pointer Fields
A struct field can be a pointer to a signed integer, floating-point number, string or bool.
Putting a non-nil pointer will store its dereferenced value. Putting a nil pointer will
store a Datastore NULL, unless the field is marked omitempty, in which case no property
will be stored.
A struct field can be a pointer to a signed integer, floating-point number, string or
bool. Putting a non-nil pointer will store its dereferenced value. Putting a nil
pointer will store a Datastore Null property, unless the field is marked omitempty,
in which case no property will be stored.
Getting a NULL into a pointer field sets the pointer to nil. Getting any other value
Loading a Null into a pointer field sets the pointer to nil. Loading any other value
allocates new storage with the value, and sets the field to point to it.

@ -811,3 +811,76 @@ func TestLoadPointers(t *testing.T) {
}
}
}
func TestLoadNonArrayIntoSlice(t *testing.T) {
// Loading a non-array value into a slice field results in a slice of size 1.
var got struct{ S []string }
if err := LoadStruct(&got, []Property{{Name: "S", Value: "x"}}); err != nil {
t.Fatal(err)
}
if want := []string{"x"}; !testutil.Equal(got.S, want) {
t.Errorf("got %#v, want %#v", got.S, want)
}
}
func TestLoadEmptyArrayIntoSlice(t *testing.T) {
// Loading an empty array into a slice field is a no-op.
var got = struct{ S []string }{[]string{"x"}}
if err := LoadStruct(&got, []Property{{Name: "S", Value: []interface{}{}}}); err != nil {
t.Fatal(err)
}
if want := []string{"x"}; !testutil.Equal(got.S, want) {
t.Errorf("got %#v, want %#v", got.S, want)
}
}
func TestLoadNull(t *testing.T) {
// Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value.
// Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value.
// (As expected from the behavior of slices and nulls with basic types.)
type S struct {
I int64
F float64
S string
B bool
A []string
}
got := S{
I: 1,
F: 1.0,
S: "1",
B: true,
A: []string{"X"},
}
want := S{A: []string{""}}
props := []Property{{Name: "I"}, {Name: "F"}, {Name: "S"}, {Name: "B"}, {Name: "A"}}
if err := LoadStruct(&got, props); err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
// Loading a Null into a pointer to struct field results in a nil field.
got2 := struct{ X *S }{X: &S{}}
if err := LoadStruct(&got2, []Property{{Name: "X"}}); err != nil {
t.Fatal(err)
}
if got2.X != nil {
t.Errorf("got %v, want nil", got2.X)
}
// Loading a Null into a struct field is an error.
got3 := struct{ X S }{}
err := LoadStruct(&got3, []Property{{Name: "X"}})
if err == nil {
t.Error("got nil, want error")
}
}
// var got2 struct{ S []Pet }
// if err := LoadStruct(&got2, []Property{{Name: "S", Value: nil}}); err != nil {
// t.Fatal(err)
// }
// }

45
vendor/cloud.google.com/go/datastore/oc_test.go generated vendored Normal file

@ -0,0 +1,45 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package datastore
import (
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
)
func TestOCTracing(t *testing.T) {
ctx := context.Background()
client := newTestClient(ctx, t)
defer client.Close()
te := testutil.NewTestExporter()
defer te.Unregister()
type SomeValue struct {
S string
}
_, err := client.Put(ctx, IncompleteKey("SomeKey", nil), &SomeValue{"foo"})
if err != nil {
t.Fatalf("client.Put: %v", err)
}
if len(te.Spans) == 0 {
t.Fatalf("Expected some span to be created, but got %d", 0)
}
}

@ -23,6 +23,7 @@ import (
"strconv"
"strings"
"cloud.google.com/go/internal/trace"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
@ -445,7 +446,10 @@ func (q *Query) toProto(req *pb.RunQueryRequest) error {
// with the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise Count will
// continue until it finishes counting or the provided context expires.
func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
func (c *Client) Count(ctx context.Context, q *Query) (n int, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Count")
defer func() { trace.EndSpan(ctx, err) }()
// Check that the query is well-formed.
if q.err != nil {
return 0, q.err
@ -459,7 +463,6 @@ func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
// Create an iterator and use it to walk through the batches of results
// directly.
it := c.Run(ctx, newQ)
n := 0
for {
err := it.nextBatch()
if err == iterator.Done {
@ -492,7 +495,10 @@ func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
// expected to be small, it is best to specify a limit; otherwise GetAll will
// continue until it finishes collecting results or the provided context
// expires.
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) {
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) (keys []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.GetAll")
defer func() { trace.EndSpan(ctx, err) }()
var (
dv reflect.Value
mat multiArgType
@ -511,7 +517,6 @@ func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key,
}
}
var keys []*Key
for t := c.Run(ctx, q); ; {
k, e, err := t.next()
if err == iterator.Done {
@ -575,6 +580,9 @@ func (c *Client) Run(ctx context.Context, q *Query) *Iterator {
ProjectId: c.dataset,
},
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Run")
defer func() { trace.EndSpan(ctx, t.err) }()
if q.namespace != "" {
t.req.PartitionId = &pb.PartitionId{
NamespaceId: q.namespace,
@ -622,7 +630,7 @@ type Iterator struct {
// If the query is not keys only and dst is non-nil, it also loads the entity
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
// the same semantics and possible errors as for the Get function.
func (t *Iterator) Next(dst interface{}) (*Key, error) {
func (t *Iterator) Next(dst interface{}) (k *Key, err error) {
k, e, err := t.next()
if err != nil {
return nil, err
@ -725,7 +733,10 @@ func (t *Iterator) nextBatch() error {
}
// Cursor returns a cursor for the iterator's current location.
func (t *Iterator) Cursor() (Cursor, error) {
func (t *Iterator) Cursor() (c Cursor, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Query.Cursor")
defer func() { trace.EndSpan(t.ctx, err) }()
// If there is still an offset, we need to the skip those results first.
for t.err == nil && t.offset > 0 {
t.err = t.nextBatch()

@ -270,3 +270,16 @@ func TestSavePointers(t *testing.T) {
}
}
}
func TestSaveEmptySlice(t *testing.T) {
// Zero-length slice fields are not saved.
for _, slice := range [][]string{nil, {}} {
got, err := SaveStruct(&struct{ S []string }{S: slice})
if err != nil {
t.Fatal(err)
}
if len(got) != 0 {
t.Errorf("%#v: got %d properties, wanted zero", slice, len(got))
}
}
}

@ -17,6 +17,7 @@ package datastore
import (
"errors"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -95,7 +96,10 @@ type Transaction struct {
}
// NewTransaction starts a new transaction.
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) {
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (t *Transaction, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.NewTransaction")
defer func() { trace.EndSpan(ctx, err) }()
for _, o := range opts {
if _, ok := o.(maxAttempts); ok {
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
@ -152,7 +156,10 @@ func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*T
// is, it should have the same result when called multiple times. Note that
// Transaction.Get will append when unmarshalling slice fields, so it is not
// necessarily idempotent.
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) {
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (cmt *Commit, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.RunInTransaction")
defer func() { trace.EndSpan(ctx, err) }()
settings := newTransactionSettings(opts)
for n := 0; n < settings.attempts; n++ {
tx, err := c.newTransaction(ctx, settings)
@ -176,7 +183,10 @@ func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) e
}
// Commit applies the enqueued operations atomically.
func (t *Transaction) Commit() (*Commit, error) {
func (t *Transaction) Commit() (c *Commit, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Commit")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return nil, errExpiredTransaction
}
@ -196,7 +206,6 @@ func (t *Transaction) Commit() (*Commit, error) {
}
// Copy any newly minted keys into the returned keys.
commit := &Commit{}
for i, p := range t.pending {
if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil {
return nil, errors.New("datastore: internal error: server returned the wrong mutation results")
@ -206,20 +215,23 @@ func (t *Transaction) Commit() (*Commit, error) {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
p.key = key
p.commit = commit
p.commit = c
}
return commit, nil
return c, nil
}
// Rollback abandons a pending transaction.
func (t *Transaction) Rollback() error {
func (t *Transaction) Rollback() (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Rollback")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}
id := t.id
t.id = nil
_, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
_, err = t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
ProjectId: t.client.dataset,
Transaction: id,
})
@ -231,11 +243,14 @@ func (t *Transaction) Rollback() error {
// snapshot. Furthermore, if the transaction is set to a serializable isolation
// level, another transaction cannot concurrently modify the data that is read
// or modified by this transaction.
func (t *Transaction) Get(key *Key, dst interface{}) error {
func (t *Transaction) Get(key *Key, dst interface{}) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Get")
defer func() { trace.EndSpan(t.ctx, err) }()
opts := &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
}
err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
err = t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
if me, ok := err.(MultiError); ok {
return me[0]
}
@ -243,7 +258,10 @@ func (t *Transaction) Get(key *Key, dst interface{}) error {
}
// GetMulti is a batch version of Get.
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.GetMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}
@ -272,8 +290,11 @@ func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
// PutMulti is a batch version of Put. One PendingKey is returned for each
// element of src in the same order.
func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {
// TODO(jba): rewrite in terms of Mutate.
// TODO(jba): rewrite in terms of Mutate.
func (t *Transaction) PutMulti(keys []*Key, src interface{}) (ret []*PendingKey, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.PutMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return nil, errExpiredTransaction
}
@ -285,7 +306,7 @@ func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, err
t.mutations = append(t.mutations, mutations...)
// Prepare the returned handles, pre-populating where possible.
ret := make([]*PendingKey, len(keys))
ret = make([]*PendingKey, len(keys))
for i, key := range keys {
p := &PendingKey{}
if key.Incomplete() {
@ -312,8 +333,11 @@ func (t *Transaction) Delete(key *Key) error {
}
// DeleteMulti is a batch version of Delete.
func (t *Transaction) DeleteMulti(keys []*Key) error {
// TODO(jba): rewrite in terms of Mutate.
// TODO(jba): rewrite in terms of Mutate.
func (t *Transaction) DeleteMulti(keys []*Key) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.DeleteMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}

812
vendor/cloud.google.com/go/dlp/apiv2/dlp_client.go generated vendored Normal file

@ -0,0 +1,812 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
InspectContent []gax.CallOption
RedactImage []gax.CallOption
DeidentifyContent []gax.CallOption
ReidentifyContent []gax.CallOption
ListInfoTypes []gax.CallOption
CreateInspectTemplate []gax.CallOption
UpdateInspectTemplate []gax.CallOption
GetInspectTemplate []gax.CallOption
ListInspectTemplates []gax.CallOption
DeleteInspectTemplate []gax.CallOption
CreateDeidentifyTemplate []gax.CallOption
UpdateDeidentifyTemplate []gax.CallOption
GetDeidentifyTemplate []gax.CallOption
ListDeidentifyTemplates []gax.CallOption
DeleteDeidentifyTemplate []gax.CallOption
CreateDlpJob []gax.CallOption
ListDlpJobs []gax.CallOption
GetDlpJob []gax.CallOption
DeleteDlpJob []gax.CallOption
CancelDlpJob []gax.CallOption
ListJobTriggers []gax.CallOption
GetJobTrigger []gax.CallOption
DeleteJobTrigger []gax.CallOption
UpdateJobTrigger []gax.CallOption
CreateJobTrigger []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("dlp.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
InspectContent: retry[[2]string{"default", "idempotent"}],
RedactImage: retry[[2]string{"default", "idempotent"}],
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
ReidentifyContent: retry[[2]string{"default", "idempotent"}],
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
CreateInspectTemplate: retry[[2]string{"default", "non_idempotent"}],
UpdateInspectTemplate: retry[[2]string{"default", "non_idempotent"}],
GetInspectTemplate: retry[[2]string{"default", "idempotent"}],
ListInspectTemplates: retry[[2]string{"default", "idempotent"}],
DeleteInspectTemplate: retry[[2]string{"default", "idempotent"}],
CreateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
UpdateDeidentifyTemplate: retry[[2]string{"default", "non_idempotent"}],
GetDeidentifyTemplate: retry[[2]string{"default", "idempotent"}],
ListDeidentifyTemplates: retry[[2]string{"default", "idempotent"}],
DeleteDeidentifyTemplate: retry[[2]string{"default", "idempotent"}],
CreateDlpJob: retry[[2]string{"default", "non_idempotent"}],
ListDlpJobs: retry[[2]string{"default", "idempotent"}],
GetDlpJob: retry[[2]string{"default", "idempotent"}],
DeleteDlpJob: retry[[2]string{"default", "idempotent"}],
CancelDlpJob: retry[[2]string{"default", "non_idempotent"}],
ListJobTriggers: retry[[2]string{"default", "idempotent"}],
GetJobTrigger: retry[[2]string{"default", "idempotent"}],
DeleteJobTrigger: retry[[2]string{"default", "idempotent"}],
UpdateJobTrigger: retry[[2]string{"default", "non_idempotent"}],
CreateJobTrigger: retry[[2]string{"default", "non_idempotent"}],
}
}
// Client is a client for interacting with Cloud Data Loss Prevention (DLP) API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client dlppb.DlpServiceClient
// The call options for this service.
CallOptions *CallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewClient creates a new dlp service client.
//
// The Cloud Data Loss Prevention (DLP) API is a service that allows clients
// to detect the presence of Personally Identifiable Information (PII) and other
// privacy-sensitive data in user-supplied, unstructured data streams, like text
// blocks or images.
// The service also includes methods for sensitive data redaction and
// scheduling of data scans on Google Cloud Platform based data sets.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: dlppb.NewDlpServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// InspectContent finds potentially sensitive info in content.
// This method has limits on input size, processing time, and output size.
// How-to guide for text (at /dlp/docs/inspecting-text), How-to guide for
// images (at /dlp/docs/inspecting-images)
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
var resp *dlppb.InspectContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// RedactImage redacts potentially sensitive info from an image.
// This method has limits on input size, processing time, and output size.
// How-to guide (at /dlp/docs/redacting-sensitive-data-images)
func (c *Client) RedactImage(ctx context.Context, req *dlppb.RedactImageRequest, opts ...gax.CallOption) (*dlppb.RedactImageResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.RedactImage[0:len(c.CallOptions.RedactImage):len(c.CallOptions.RedactImage)], opts...)
var resp *dlppb.RedactImageResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.RedactImage(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeidentifyContent de-identifies potentially sensitive info from a ContentItem.
// This method has limits on input size and output size.
// How-to guide (at /dlp/docs/deidentify-sensitive-data)
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
var resp *dlppb.DeidentifyContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ReidentifyContent re-identifies content that has been de-identified.
func (c *Client) ReidentifyContent(ctx context.Context, req *dlppb.ReidentifyContentRequest, opts ...gax.CallOption) (*dlppb.ReidentifyContentResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ReidentifyContent[0:len(c.CallOptions.ReidentifyContent):len(c.CallOptions.ReidentifyContent)], opts...)
var resp *dlppb.ReidentifyContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ReidentifyContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListInfoTypes returns a list of the sensitive information types that the DLP API
// supports. For more information, see Listing supported predefined
// infoTypes (at /dlp/docs/listing-infotypes).
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
var resp *dlppb.ListInfoTypesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateInspectTemplate creates an inspect template for re-using frequently used configuration
// for inspecting content, images, and storage.
func (c *Client) CreateInspectTemplate(ctx context.Context, req *dlppb.CreateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateInspectTemplate[0:len(c.CallOptions.CreateInspectTemplate):len(c.CallOptions.CreateInspectTemplate)], opts...)
var resp *dlppb.InspectTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateInspectTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateInspectTemplate updates the inspect template.
func (c *Client) UpdateInspectTemplate(ctx context.Context, req *dlppb.UpdateInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateInspectTemplate[0:len(c.CallOptions.UpdateInspectTemplate):len(c.CallOptions.UpdateInspectTemplate)], opts...)
var resp *dlppb.InspectTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateInspectTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetInspectTemplate gets an inspect template.
func (c *Client) GetInspectTemplate(ctx context.Context, req *dlppb.GetInspectTemplateRequest, opts ...gax.CallOption) (*dlppb.InspectTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetInspectTemplate[0:len(c.CallOptions.GetInspectTemplate):len(c.CallOptions.GetInspectTemplate)], opts...)
var resp *dlppb.InspectTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetInspectTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListInspectTemplates lists inspect templates.
func (c *Client) ListInspectTemplates(ctx context.Context, req *dlppb.ListInspectTemplatesRequest, opts ...gax.CallOption) *InspectTemplateIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListInspectTemplates[0:len(c.CallOptions.ListInspectTemplates):len(c.CallOptions.ListInspectTemplates)], opts...)
it := &InspectTemplateIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.InspectTemplate, string, error) {
var resp *dlppb.ListInspectTemplatesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInspectTemplates(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.InspectTemplates, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DeleteInspectTemplate deletes an inspect template.
func (c *Client) DeleteInspectTemplate(ctx context.Context, req *dlppb.DeleteInspectTemplateRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteInspectTemplate[0:len(c.CallOptions.DeleteInspectTemplate):len(c.CallOptions.DeleteInspectTemplate)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteInspectTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// CreateDeidentifyTemplate creates a de-identify template for re-using frequently used configuration
// for Deidentifying content, images, and storage.
func (c *Client) CreateDeidentifyTemplate(ctx context.Context, req *dlppb.CreateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateDeidentifyTemplate[0:len(c.CallOptions.CreateDeidentifyTemplate):len(c.CallOptions.CreateDeidentifyTemplate)], opts...)
var resp *dlppb.DeidentifyTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateDeidentifyTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateDeidentifyTemplate updates the de-identify template.
func (c *Client) UpdateDeidentifyTemplate(ctx context.Context, req *dlppb.UpdateDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateDeidentifyTemplate[0:len(c.CallOptions.UpdateDeidentifyTemplate):len(c.CallOptions.UpdateDeidentifyTemplate)], opts...)
var resp *dlppb.DeidentifyTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateDeidentifyTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// GetDeidentifyTemplate gets a de-identify template.
func (c *Client) GetDeidentifyTemplate(ctx context.Context, req *dlppb.GetDeidentifyTemplateRequest, opts ...gax.CallOption) (*dlppb.DeidentifyTemplate, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDeidentifyTemplate[0:len(c.CallOptions.GetDeidentifyTemplate):len(c.CallOptions.GetDeidentifyTemplate)], opts...)
var resp *dlppb.DeidentifyTemplate
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDeidentifyTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDeidentifyTemplates lists de-identify templates.
func (c *Client) ListDeidentifyTemplates(ctx context.Context, req *dlppb.ListDeidentifyTemplatesRequest, opts ...gax.CallOption) *DeidentifyTemplateIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDeidentifyTemplates[0:len(c.CallOptions.ListDeidentifyTemplates):len(c.CallOptions.ListDeidentifyTemplates)], opts...)
it := &DeidentifyTemplateIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DeidentifyTemplate, string, error) {
var resp *dlppb.ListDeidentifyTemplatesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDeidentifyTemplates(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.DeidentifyTemplates, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// DeleteDeidentifyTemplate deletes a de-identify template.
func (c *Client) DeleteDeidentifyTemplate(ctx context.Context, req *dlppb.DeleteDeidentifyTemplateRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteDeidentifyTemplate[0:len(c.CallOptions.DeleteDeidentifyTemplate):len(c.CallOptions.DeleteDeidentifyTemplate)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteDeidentifyTemplate(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// CreateDlpJob creates a new job to inspect storage or calculate risk metrics How-to
// guide (at /dlp/docs/compute-risk-analysis).
func (c *Client) CreateDlpJob(ctx context.Context, req *dlppb.CreateDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateDlpJob[0:len(c.CallOptions.CreateDlpJob):len(c.CallOptions.CreateDlpJob)], opts...)
var resp *dlppb.DlpJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateDlpJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListDlpJobs lists DlpJobs that match the specified filter in the request.
func (c *Client) ListDlpJobs(ctx context.Context, req *dlppb.ListDlpJobsRequest, opts ...gax.CallOption) *DlpJobIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListDlpJobs[0:len(c.CallOptions.ListDlpJobs):len(c.CallOptions.ListDlpJobs)], opts...)
it := &DlpJobIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.DlpJob, string, error) {
var resp *dlppb.ListDlpJobsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListDlpJobs(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.Jobs, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// GetDlpJob gets the latest state of a long-running DlpJob.
func (c *Client) GetDlpJob(ctx context.Context, req *dlppb.GetDlpJobRequest, opts ...gax.CallOption) (*dlppb.DlpJob, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetDlpJob[0:len(c.CallOptions.GetDlpJob):len(c.CallOptions.GetDlpJob)], opts...)
var resp *dlppb.DlpJob
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetDlpJob(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteDlpJob deletes a long-running DlpJob. This method indicates that the client is
// no longer interested in the DlpJob result. The job will be cancelled if
// possible.
func (c *Client) DeleteDlpJob(ctx context.Context, req *dlppb.DeleteDlpJobRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteDlpJob[0:len(c.CallOptions.DeleteDlpJob):len(c.CallOptions.DeleteDlpJob)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteDlpJob(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// CancelDlpJob starts asynchronous cancellation on a long-running DlpJob. The server
// makes a best effort to cancel the DlpJob, but success is not
// guaranteed.
func (c *Client) CancelDlpJob(ctx context.Context, req *dlppb.CancelDlpJobRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CancelDlpJob[0:len(c.CallOptions.CancelDlpJob):len(c.CallOptions.CancelDlpJob)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.CancelDlpJob(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// ListJobTriggers lists job triggers.
func (c *Client) ListJobTriggers(ctx context.Context, req *dlppb.ListJobTriggersRequest, opts ...gax.CallOption) *JobTriggerIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListJobTriggers[0:len(c.CallOptions.ListJobTriggers):len(c.CallOptions.ListJobTriggers)], opts...)
it := &JobTriggerIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*dlppb.JobTrigger, string, error) {
var resp *dlppb.ListJobTriggersResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListJobTriggers(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.JobTriggers, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// GetJobTrigger gets a job trigger.
func (c *Client) GetJobTrigger(ctx context.Context, req *dlppb.GetJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetJobTrigger[0:len(c.CallOptions.GetJobTrigger):len(c.CallOptions.GetJobTrigger)], opts...)
var resp *dlppb.JobTrigger
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.GetJobTrigger(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteJobTrigger deletes a job trigger.
func (c *Client) DeleteJobTrigger(ctx context.Context, req *dlppb.DeleteJobTriggerRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteJobTrigger[0:len(c.CallOptions.DeleteJobTrigger):len(c.CallOptions.DeleteJobTrigger)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.client.DeleteJobTrigger(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// UpdateJobTrigger updates a job trigger.
func (c *Client) UpdateJobTrigger(ctx context.Context, req *dlppb.UpdateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateJobTrigger[0:len(c.CallOptions.UpdateJobTrigger):len(c.CallOptions.UpdateJobTrigger)], opts...)
var resp *dlppb.JobTrigger
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.UpdateJobTrigger(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateJobTrigger creates a job trigger to run DLP actions such as scanning storage for
// sensitive information on a set schedule.
func (c *Client) CreateJobTrigger(ctx context.Context, req *dlppb.CreateJobTriggerRequest, opts ...gax.CallOption) (*dlppb.JobTrigger, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateJobTrigger[0:len(c.CallOptions.CreateJobTrigger):len(c.CallOptions.CreateJobTrigger)], opts...)
var resp *dlppb.JobTrigger
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateJobTrigger(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeidentifyTemplateIterator manages a stream of *dlppb.DeidentifyTemplate.
type DeidentifyTemplateIterator struct {
items []*dlppb.DeidentifyTemplate
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DeidentifyTemplate, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DeidentifyTemplateIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DeidentifyTemplateIterator) Next() (*dlppb.DeidentifyTemplate, error) {
var item *dlppb.DeidentifyTemplate
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DeidentifyTemplateIterator) bufLen() int {
return len(it.items)
}
func (it *DeidentifyTemplateIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// DlpJobIterator manages a stream of *dlppb.DlpJob.
type DlpJobIterator struct {
items []*dlppb.DlpJob
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.DlpJob, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *DlpJobIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *DlpJobIterator) Next() (*dlppb.DlpJob, error) {
var item *dlppb.DlpJob
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *DlpJobIterator) bufLen() int {
return len(it.items)
}
func (it *DlpJobIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// InspectTemplateIterator manages a stream of *dlppb.InspectTemplate.
type InspectTemplateIterator struct {
items []*dlppb.InspectTemplate
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.InspectTemplate, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *InspectTemplateIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *InspectTemplateIterator) Next() (*dlppb.InspectTemplate, error) {
var item *dlppb.InspectTemplate
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *InspectTemplateIterator) bufLen() int {
return len(it.items)
}
func (it *InspectTemplateIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// JobTriggerIterator manages a stream of *dlppb.JobTrigger.
type JobTriggerIterator struct {
items []*dlppb.JobTrigger
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*dlppb.JobTrigger, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *JobTriggerIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *JobTriggerIterator) Next() (*dlppb.JobTrigger, error) {
var item *dlppb.JobTrigger
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobTriggerIterator) bufLen() int {
return len(it.items)
}
func (it *JobTriggerIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

@ -0,0 +1,498 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp_test
import (
"cloud.google.com/go/dlp/apiv2"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_InspectContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.InspectContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.InspectContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_RedactImage() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.RedactImageRequest{
// TODO: Fill request struct fields.
}
resp, err := c.RedactImage(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeidentifyContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeidentifyContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.DeidentifyContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ReidentifyContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ReidentifyContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ReidentifyContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInfoTypes() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInfoTypesRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListInfoTypes(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateInspectTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateInspectTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateInspectTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateInspectTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.UpdateInspectTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateInspectTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetInspectTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.GetInspectTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetInspectTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInspectTemplates() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInspectTemplatesRequest{
// TODO: Fill request struct fields.
}
it := c.ListInspectTemplates(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_DeleteInspectTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeleteInspectTemplateRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteInspectTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_CreateDeidentifyTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateDeidentifyTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateDeidentifyTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_UpdateDeidentifyTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.UpdateDeidentifyTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateDeidentifyTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_GetDeidentifyTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.GetDeidentifyTemplateRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDeidentifyTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDeidentifyTemplates() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListDeidentifyTemplatesRequest{
// TODO: Fill request struct fields.
}
it := c.ListDeidentifyTemplates(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_DeleteDeidentifyTemplate() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeleteDeidentifyTemplateRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteDeidentifyTemplate(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_CreateDlpJob() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateDlpJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateDlpJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListDlpJobs() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListDlpJobsRequest{
// TODO: Fill request struct fields.
}
it := c.ListDlpJobs(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_GetDlpJob() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.GetDlpJobRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetDlpJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteDlpJob() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeleteDlpJobRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteDlpJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_CancelDlpJob() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CancelDlpJobRequest{
// TODO: Fill request struct fields.
}
err = c.CancelDlpJob(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_ListJobTriggers() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListJobTriggersRequest{
// TODO: Fill request struct fields.
}
it := c.ListJobTriggers(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleClient_GetJobTrigger() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.GetJobTriggerRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetJobTrigger(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_DeleteJobTrigger() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.DeleteJobTriggerRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteJobTrigger(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleClient_UpdateJobTrigger() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.UpdateJobTriggerRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateJobTrigger(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateJobTrigger() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateJobTriggerRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateJobTrigger(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

47
vendor/cloud.google.com/go/dlp/apiv2/doc.go generated vendored Normal file

@ -0,0 +1,47 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package dlp is an auto-generated package for the
// Cloud Data Loss Prevention (DLP) API.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Provides methods for detection of privacy-sensitive fragments in text,
// images, and Google Cloud Platform storage repositories.
package dlp // import "cloud.google.com/go/dlp/apiv2"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

1902
vendor/cloud.google.com/go/dlp/apiv2/mock_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

@ -69,13 +69,13 @@ func TestReportErrorsServiceSmoke(t *testing.T) {
LineNumber: lineNumber,
FunctionName: functionName,
}
var context = &clouderrorreportingpb.ErrorContext{
var context_ = &clouderrorreportingpb.ErrorContext{
ReportLocation: reportLocation,
}
var event = &clouderrorreportingpb.ReportedErrorEvent{
Message: message,
ServiceContext: serviceContext,
Context: context,
Context: context_,
}
var request = &clouderrorreportingpb.ReportErrorEventRequest{
ProjectName: formattedProjectName,

@ -29,9 +29,12 @@ import (
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// resourcePrefixHeader is the name of the metadata header used to indicate
@ -123,7 +126,7 @@ func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *Documen
// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are
// returned in the order of the given DocumentRefs.
//
// If a document is not present, the corresponding DocumentSnapshot will be nil.
// If a document is not present, the corresponding DocumentSnapshot's Exists method will return false.
func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*DocumentSnapshot, error) {
if err := checkTransaction(ctx); err != nil {
return nil, err
@ -133,11 +136,13 @@ func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*Documen
func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte) ([]*DocumentSnapshot, error) {
var docNames []string
for _, dr := range docRefs {
docIndex := map[string]int{} // doc name to position in docRefs
for i, dr := range docRefs {
if dr == nil {
return nil, errNilDocRef
}
docNames = append(docNames, dr.Path)
docIndex[dr.Path] = i
}
req := &pb.BatchGetDocumentsRequest{
Database: c.path(),
@ -151,44 +156,43 @@ func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte)
return nil, err
}
// Read results from the stream and add them to a map.
docMap := map[string]*pb.Document{}
// Read and remember all results from the stream.
var resps []*pb.BatchGetDocumentsResponse
for {
res, err := streamClient.Recv()
resp, err := streamClient.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
switch x := res.Result.(type) {
case *pb.BatchGetDocumentsResponse_Found:
docMap[x.Found.Name] = x.Found
resps = append(resps, resp)
}
// Results may arrive out of order. Put each at the right index.
docs := make([]*DocumentSnapshot, len(docNames))
for _, resp := range resps {
var (
i int
doc *pb.Document
err error
)
switch r := resp.Result.(type) {
case *pb.BatchGetDocumentsResponse_Found:
i = docIndex[r.Found.Name]
doc = r.Found
case *pb.BatchGetDocumentsResponse_Missing:
if docMap[x.Missing] != nil {
return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing)
}
docMap[x.Missing] = nil
i = docIndex[r.Missing]
doc = nil
default:
return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
}
}
// Put the documents we've gathered in the same order as the requesting slice of
// DocumentRefs.
docs := make([]*DocumentSnapshot, len(docNames))
for i, name := range docNames {
pbDoc, ok := docMap[name]
if !ok {
return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name)
if docs[i] != nil {
return nil, fmt.Errorf("firestore: %q seen twice", docRefs[i].Path)
}
if pbDoc != nil {
doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c)
if err != nil {
return nil, err
}
docs[i] = doc
docs[i], err = newDocumentSnapshot(docRefs[i], doc, c, resp.ReadTime)
if err != nil {
return nil, err
}
}
return docs, nil
@ -266,3 +270,14 @@ func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) {
}
return &WriteResult{UpdateTime: t}, nil
}
func sleep(ctx context.Context, dur time.Duration) error {
switch err := gax.Sleep(ctx, dur); err {
case context.Canceled:
return status.Error(codes.Canceled, "context canceled")
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, "context deadline exceeded")
default:
return err
}
}

@ -17,10 +17,11 @@ package firestore
import (
"testing"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var testClient = &Client{
@ -124,17 +125,21 @@ func testGetAll(t *testing.T, c *Client, srv *mockServer, dbPath string, getAll
Fields: map[string]*pb.Value{"f": intval(1)},
},
}
wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2, aTimestamp3}
srv.addRPC(req,
[]interface{}{
// deliberately put these out of order
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]},
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[2]},
ReadTime: aTimestamp3,
},
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]},
Result: &pb.BatchGetDocumentsResponse_Found{wantPBDocs[0]},
ReadTime: aTimestamp,
},
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"},
Result: &pb.BatchGetDocumentsResponse_Missing{dbPath + "/documents/C/b"},
ReadTime: aTimestamp2,
},
},
)
@ -151,12 +156,9 @@ func testGetAll(t *testing.T, c *Client, srv *mockServer, dbPath string, getAll
t.Errorf("got %d docs, wanted %d", got, want)
}
for i, got := range docs {
var want *DocumentSnapshot
if wantPBDocs[i] != nil {
want, err = newDocumentSnapshot(docRefs[i], wantPBDocs[i], c)
if err != nil {
t.Fatal(err)
}
want, err := newDocumentSnapshot(docRefs[i], wantPBDocs[i], c, wantReadTimes[i])
if err != nil {
t.Fatal(err)
}
if diff := testDiff(got, want); diff != "" {
t.Errorf("#%d: got=--, want==++\n%s", i, diff)
@ -181,7 +183,7 @@ func TestGetAllErrors(t *testing.T) {
Database: dbPath,
Documents: []string{docPath},
},
[]interface{}{grpc.Errorf(codes.Internal, "")},
[]interface{}{status.Errorf(codes.Internal, "")},
)
_, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")})
codeEq(t, "GetAll #1", codes.Internal, err)
@ -195,27 +197,16 @@ func TestGetAllErrors(t *testing.T) {
},
[]interface{}{
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}},
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{Name: docPath}},
ReadTime: aTimestamp,
},
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Missing{docPath},
Result: &pb.BatchGetDocumentsResponse_Missing{docPath},
ReadTime: aTimestamp,
},
},
)
if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil {
t.Error("got nil, want error")
}
// Doc never appears (server bug).
srv.reset()
srv.addRPC(
&pb.BatchGetDocumentsRequest{
Database: dbPath,
Documents: []string{docPath},
},
[]interface{}{},
)
if _, err := c.GetAll(ctx, []*DocumentRef{c.Doc("C/a")}); err == nil {
t.Error("got nil, want error")
}
}

@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"math"
"path"
"path/filepath"
"strings"
@ -77,12 +78,21 @@ func runTest(t *testing.T, msg string, test *pb.Test) {
ctx := context.Background()
c, srv := newMock(t)
switch tt := test.Test.(type) {
case *pb.Test_Get:
srv.addRPC(tt.Get.Request, &fspb.Document{
CreateTime: &ts.Timestamp{},
UpdateTime: &ts.Timestamp{},
req := &fspb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: []string{tt.Get.DocRefPath},
}
srv.addRPC(req, []interface{}{
&fspb.BatchGetDocumentsResponse{
Result: &fspb.BatchGetDocumentsResponse_Found{&fspb.Document{
Name: tt.Get.DocRefPath,
CreateTime: &ts.Timestamp{},
UpdateTime: &ts.Timestamp{},
}},
ReadTime: &ts.Timestamp{},
},
})
ref := docRefFromPath(tt.Get.DocRefPath, c)
_, err := ref.Get(ctx)
@ -202,6 +212,8 @@ func convertTestValue(v interface{}) interface{} {
return ServerTimestamp
case "Delete":
return Delete
case "NaN":
return math.NaN()
default:
return v
}

@ -21,6 +21,9 @@ database.
See https://cloud.google.com/firestore/docs for an introduction
to Cloud Firestore and additional help on using the Firestore API.
Note: you can't use both Cloud Firestore and Cloud Datastore in the same
project.
Creating a Client
To start working with this package, create a client with a project ID:
@ -167,6 +170,7 @@ Call the Query's Documents method to get an iterator, and use it like
the other Google Cloud Client iterators.
iter := q.Documents(ctx)
defer iter.Stop()
for {
doc, err := iter.Next()
if err == iterator.Done {

@ -17,11 +17,14 @@ package firestore
import (
"errors"
"fmt"
"io"
"reflect"
"sort"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
vkit "cloud.google.com/go/firestore/apiv1beta1"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
@ -54,9 +57,11 @@ func (d *DocumentRef) Collection(id string) *CollectionRef {
return newCollRefWithParent(d.Parent.c, d, id)
}
// Get retrieves the document. It returns a NotFound error if the document does not exist.
// You can test for NotFound with
// Get retrieves the document. If the document does not exist, Get return a NotFound error, which
// can be checked with
// grpc.Code(err) == codes.NotFound
// In that case, Get returns a non-nil DocumentSnapshot whose Exists method return false and whose
// ReadTime is the time of the failed read operation.
func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
if err := checkTransaction(ctx); err != nil {
return nil, err
@ -64,12 +69,15 @@ func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
if d == nil {
return nil, errNilDocRef
}
doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()),
&pb.GetDocumentRequest{Name: d.Path})
docsnaps, err := d.Parent.c.getAll(ctx, []*DocumentRef{d}, nil)
if err != nil {
return nil, err
}
return newDocumentSnapshot(d, doc, d.Parent.c)
ds := docsnaps[0]
if !ds.Exists() {
return ds, status.Errorf(codes.NotFound, "%q not found", d.Path)
}
return ds, nil
}
// Create creates the document with the given data.
@ -554,3 +562,50 @@ func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func(
}
return pi.Token, nil
}
// Snapshots returns an iterator over snapshots of the document. Each time the document
// changes or is added or deleted, a new snapshot will be generated.
func (d *DocumentRef) Snapshots(ctx context.Context) *DocumentSnapshotIterator {
return &DocumentSnapshotIterator{
docref: d,
ws: newWatchStreamForDocument(ctx, d),
}
}
// DocumentSnapshotIterator is an iterator over snapshots of a document.
// Call Next on the iterator to get a snapshot of the document each time it changes.
// Call Stop on the iterator when done.
//
// For an example, see DocumentRef.Snapshots.
type DocumentSnapshotIterator struct {
docref *DocumentRef
ws *watchStream
}
// Next blocks until the document changes, then returns the DocumentSnapshot for
// the current state of the document. If the document has been deleted, Next
// returns a DocumentSnapshot whose Exists method returns false.
//
// Next never returns iterator.Done unless it is called after Stop.
func (it *DocumentSnapshotIterator) Next() (*DocumentSnapshot, error) {
btree, _, readTime, err := it.ws.nextSnapshot()
if err != nil {
if err == io.EOF {
err = iterator.Done
}
// watchStream's error is sticky, so SnapshotIterator does not need to remember it.
return nil, err
}
if btree.Len() == 0 { // document deleted
return &DocumentSnapshot{Ref: it.docref, ReadTime: readTime}, nil
}
snap, _ := btree.At(0)
return snap.(*DocumentSnapshot), nil
}
// Stop stops receiving snapshots.
// You should always call Stop when you are done with an iterator, to free up resources.
// It is not safe to call Stop concurrently with Next.
func (it *DocumentSnapshotIterator) Stop() {
it.ws.stop()
}

@ -45,7 +45,15 @@ func TestDocGet(t *testing.T) {
UpdateTime: aTimestamp,
Fields: map[string]*pb.Value{"f": intval(1)},
}
srv.addRPC(&pb.GetDocumentRequest{Name: path}, pdoc)
srv.addRPC(&pb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: []string{path},
}, []interface{}{
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{pdoc},
ReadTime: aTimestamp2,
},
})
ref := c.Collection("C").Doc("a")
gotDoc, err := ref.Get(ctx)
if err != nil {
@ -55,6 +63,7 @@ func TestDocGet(t *testing.T) {
Ref: ref,
CreateTime: aTime,
UpdateTime: aTime,
ReadTime: aTime2,
proto: pdoc,
c: c,
}
@ -62,12 +71,17 @@ func TestDocGet(t *testing.T) {
t.Fatalf("\ngot %+v\nwant %+v", gotDoc, wantDoc)
}
path2 := "projects/projectID/databases/(default)/documents/C/b"
srv.addRPC(
&pb.GetDocumentRequest{
Name: "projects/projectID/databases/(default)/documents/C/b",
},
grpc.Errorf(codes.NotFound, "not found"),
)
&pb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: []string{path2},
}, []interface{}{
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Missing{path2},
ReadTime: aTimestamp3,
},
})
_, err = c.Collection("C").Doc("b").Get(ctx)
if grpc.Code(err) != codes.NotFound {
t.Errorf("got %v, want NotFound", err)

@ -21,8 +21,11 @@ import (
"time"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/golang/protobuf/ptypes"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
// A DocumentSnapshot contains document data and metadata.
@ -42,15 +45,29 @@ type DocumentSnapshot struct {
// documents and the read time of a query.
UpdateTime time.Time
// Read-only. The time at which the document was read.
ReadTime time.Time
c *Client
proto *pb.Document
}
// Exists reports whether the DocumentSnapshot represents an existing document.
// Even if Exists returns false, the Ref and ReadTime fields of the DocumentSnapshot
// are valid.
func (d *DocumentSnapshot) Exists() bool {
return d.proto != nil
}
// Data returns the DocumentSnapshot's fields as a map.
// It is equivalent to
// var m map[string]interface{}
// d.DataTo(&m)
// except that it returns nil if the document does not exist.
func (d *DocumentSnapshot) Data() map[string]interface{} {
if !d.Exists() {
return nil
}
m, err := createMapFromValueMap(d.proto.Fields, d.c)
// Any error here is a bug in the client.
if err != nil {
@ -87,7 +104,12 @@ func (d *DocumentSnapshot) Data() map[string]interface{} {
//
// Field names given by struct field tags are observed, as described in
// DocumentRef.Create.
//
// If the document does not exist, DataTo returns a NotFound error.
func (d *DocumentSnapshot) DataTo(p interface{}) error {
if !d.Exists() {
return status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path)
}
return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c)
}
@ -98,7 +120,12 @@ func (d *DocumentSnapshot) DataTo(p interface{}) error {
// such a path.
//
// See DocumentSnapshot.DataTo for how Firestore values are converted to Go values.
//
// If the document does not exist, DataAt returns a NotFound error.
func (d *DocumentSnapshot) DataAt(path string) (interface{}, error) {
if !d.Exists() {
return nil, status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path)
}
fp, err := parseDotSeparatedString(path)
if err != nil {
return nil, err
@ -107,7 +134,11 @@ func (d *DocumentSnapshot) DataAt(path string) (interface{}, error) {
}
// DataAtPath returns the data value denoted by the FieldPath fp.
// If the document does not exist, DataAtPath returns a NotFound error.
func (d *DocumentSnapshot) DataAtPath(fp FieldPath) (interface{}, error) {
if !d.Exists() {
return nil, status.Errorf(codes.NotFound, "document %s does not exist", d.Ref.Path)
}
v, err := valueAtPath(fp, d.proto.Fields)
if err != nil {
return nil, err
@ -241,21 +272,30 @@ func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]Field
return paths, nil
}
func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) {
func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client, readTime *tspb.Timestamp) (*DocumentSnapshot, error) {
d := &DocumentSnapshot{
Ref: ref,
c: c,
proto: proto,
}
ts, err := ptypes.Timestamp(proto.CreateTime)
if err != nil {
return nil, err
if proto != nil {
ts, err := ptypes.Timestamp(proto.CreateTime)
if err != nil {
return nil, err
}
d.CreateTime = ts
ts, err = ptypes.Timestamp(proto.UpdateTime)
if err != nil {
return nil, err
}
d.UpdateTime = ts
}
d.CreateTime = ts
ts, err = ptypes.Timestamp(proto.UpdateTime)
if err != nil {
return nil, err
if readTime != nil {
ts, err := ptypes.Timestamp(readTime)
if err != nil {
return nil, err
}
d.ReadTime = ts
}
d.UpdateTime = ts
return d, nil
}

@ -69,10 +69,11 @@ func TestNewDocumentSnapshot(t *testing.T) {
Ref: docRef,
CreateTime: time.Unix(10, 0).UTC(),
UpdateTime: time.Unix(20, 0).UTC(),
ReadTime: aTime,
proto: in,
c: c,
}
got, err := newDocumentSnapshot(docRef, in, c)
got, err := newDocumentSnapshot(docRef, in, c, aTimestamp)
if err != nil {
t.Fatal(err)
}

@ -316,6 +316,24 @@ func ExampleDocumentRef_Get() {
_ = docsnap // TODO: Use DocumentSnapshot.
}
func ExampleDocumentRef_Snapshots() {
ctx := context.Background()
client, err := firestore.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
defer client.Close()
iter := client.Doc("States/Idaho").Snapshots(ctx)
defer iter.Stop()
for {
docsnap, err := iter.Next()
if err != nil {
// TODO: Handle error.
}
_ = docsnap // TODO: Use DocumentSnapshot.
}
}
func ExampleDocumentSnapshot_Data() {
ctx := context.Background()
client, err := firestore.NewClient(ctx, "project-id")
@ -450,6 +468,7 @@ func ExampleDocumentIterator_Next() {
Where("pop", ">", 10).
OrderBy("pop", firestore.Desc)
iter := q.Documents(ctx)
defer iter.Stop()
for {
doc, err := iter.Next()
if err == iterator.Done {

@ -73,6 +73,14 @@ func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) erro
v.Set(reflect.ValueOf(t))
return nil
case typeOfProtoTimestamp:
x, ok := val.(*pb.Value_TimestampValue)
if !ok {
return typeErr()
}
v.Set(reflect.ValueOf(x.TimestampValue))
return nil
case typeOfLatLng:
x, ok := val.(*pb.Value_GeoPointValue)
if !ok {

@ -24,14 +24,16 @@ import (
"testing"
"time"
ts "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/genproto/googleapis/type/latlng"
)
var (
tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
ll = &latlng.LatLng{Latitude: 20, Longitude: 30}
tm = time.Date(2016, 12, 25, 0, 0, 0, 123456789, time.UTC)
ll = &latlng.LatLng{Latitude: 20, Longitude: 30}
ptm = &ts.Timestamp{12345, 67890}
)
func TestCreateFromProtoValue(t *testing.T) {
@ -187,6 +189,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) {
bs []byte
tmi time.Time
lli *latlng.LatLng
tmp *ts.Timestamp
)
bytes := []byte{1, 2, 3}
@ -197,6 +200,7 @@ func TestSetFromProtoValueNoJSON(t *testing.T) {
}{
{&bs, bytesval(bytes), bytes},
{&tmi, tsval(tm), tm},
{&tmp, &pb.Value{&pb.Value_TimestampValue{ptm}}, ptm},
{&lli, geoval(ll), ll},
} {
if err := setFromProtoValue(test.in, test.val, &Client{}); err != nil {

@ -19,13 +19,18 @@ import (
"flag"
"fmt"
"log"
"math"
"os"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"golang.org/x/net/context"
"google.golang.org/api/option"
@ -212,7 +217,8 @@ func TestIntegration_Create(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
start := time.Now()
wr := mustCreate("Create #1", t, doc, integrationTestMap)
h := testHelper{t}
wr := h.mustCreate(doc, integrationTestMap)
end := time.Now()
checkTimeBetween(t, wr.UpdateTime, start, end)
_, err := doc.Create(ctx, integrationTestMap)
@ -225,8 +231,9 @@ func TestIntegration_Create(t *testing.T) {
func TestIntegration_Get(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
mustCreate("Get #1", t, doc, integrationTestMap)
ds := mustGet("Get #1", t, doc)
h := testHelper{t}
h.mustCreate(doc, integrationTestMap)
ds := h.mustGet(doc)
if ds.CreateTime != ds.UpdateTime {
t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime)
}
@ -237,8 +244,8 @@ func TestIntegration_Get(t *testing.T) {
doc = integrationColl(t).NewDoc()
empty := map[string]interface{}{}
mustCreate("Get empty", t, doc, empty)
ds = mustGet("Get empty", t, doc)
h.mustCreate(doc, empty)
ds = h.mustGet(doc)
if ds.CreateTime != ds.UpdateTime {
t.Errorf("create time %s != update time %s", ds.CreateTime, ds.UpdateTime)
}
@ -246,20 +253,29 @@ func TestIntegration_Get(t *testing.T) {
t.Errorf("got\n%v\nwant\n%v", pretty.Value(got), pretty.Value(want))
}
_, err := integrationColl(t).NewDoc().Get(ctx)
ds, err := integrationColl(t).NewDoc().Get(ctx)
codeEq(t, "Get on a missing doc", codes.NotFound, err)
if ds == nil || ds.Exists() {
t.Fatal("got nil or existing doc snapshot, want !ds.Exists")
}
if ds.ReadTime.IsZero() {
t.Error("got zero read time")
}
}
func TestIntegration_GetAll(t *testing.T) {
type getAll struct{ N int }
h := testHelper{t}
coll := integrationColl(t)
ctx := context.Background()
var docRefs []*DocumentRef
for i := 0; i < 5; i++ {
doc := coll.NewDoc()
docRefs = append(docRefs, doc)
mustCreate("GetAll #1", t, doc, getAll{N: i})
if i != 3 {
h.mustCreate(doc, getAll{N: i})
}
}
docSnapshots, err := iClient.GetAll(ctx, docRefs)
if err != nil {
@ -269,13 +285,24 @@ func TestIntegration_GetAll(t *testing.T) {
t.Fatalf("got %d snapshots, want %d", got, want)
}
for i, ds := range docSnapshots {
var got getAll
if err := ds.DataTo(&got); err != nil {
t.Fatal(err)
if i == 3 {
if ds == nil || ds.Exists() {
t.Fatal("got nil or existing doc snapshot, want !ds.Exists")
}
err := ds.DataTo(nil)
codeEq(t, "DataTo on a missing doc", codes.NotFound, err)
} else {
var got getAll
if err := ds.DataTo(&got); err != nil {
t.Fatal(err)
}
want := getAll{N: i}
if got != want {
t.Errorf("%d: got %+v, want %+v", i, got, want)
}
}
want := getAll{N: i}
if got != want {
t.Errorf("%d: got %+v, want %+v", i, got, want)
if ds.ReadTime.IsZero() {
t.Errorf("%d: got zero read time", i)
}
}
}
@ -293,6 +320,7 @@ func TestIntegration_Add(t *testing.T) {
func TestIntegration_Set(t *testing.T) {
coll := integrationColl(t)
ctx := context.Background()
h := testHelper{t}
// Set Should be able to create a new doc.
doc := coll.NewDoc()
@ -313,7 +341,7 @@ func TestIntegration_Set(t *testing.T) {
if !wr1.UpdateTime.Before(wr2.UpdateTime) {
t.Errorf("update time did not increase: old=%s, new=%s", wr1.UpdateTime, wr2.UpdateTime)
}
ds := mustGet("Set #1", t, doc)
ds := h.mustGet(doc)
if got := ds.Data(); !testEqual(got, newData) {
t.Errorf("got %v, want %v", got, newData)
}
@ -330,7 +358,7 @@ func TestIntegration_Set(t *testing.T) {
if err != nil {
t.Fatal(err)
}
ds = mustGet("Set #2", t, doc)
ds = h.mustGet(doc)
want := map[string]interface{}{
"str": "change",
"x": "2",
@ -349,7 +377,7 @@ func TestIntegration_Set(t *testing.T) {
if err != nil {
t.Fatal(err)
}
ds = mustGet("Set #3", t, doc)
ds = h.mustGet(doc)
want = map[string]interface{}{
"str": "change",
"x": "4",
@ -366,11 +394,9 @@ func TestIntegration_Set(t *testing.T) {
func TestIntegration_Delete(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
mustCreate("Delete #1", t, doc, integrationTestMap)
wr, err := doc.Delete(ctx)
if err != nil {
t.Fatal(err)
}
h := testHelper{t}
h.mustCreate(doc, integrationTestMap)
wr := h.mustDelete(doc)
// Confirm that doc doesn't exist.
if _, err := doc.Get(ctx); grpc.Code(err) != codes.NotFound {
t.Fatalf("got error <%v>, want NotFound", err)
@ -382,7 +408,7 @@ func TestIntegration_Delete(t *testing.T) {
er(doc.Delete(ctx)))
// TODO(jba): confirm that the server should return InvalidArgument instead of
// FailedPrecondition.
wr = mustCreate("Delete #2", t, doc, integrationTestMap)
wr = h.mustCreate(doc, integrationTestMap)
codeEq(t, "Delete with wrong LastUpdateTime", codes.FailedPrecondition,
er(doc.Delete(ctx, LastUpdateTime(wr.UpdateTime.Add(-time.Millisecond)))))
codeEq(t, "Delete with right LastUpdateTime", codes.OK,
@ -392,7 +418,9 @@ func TestIntegration_Delete(t *testing.T) {
func TestIntegration_Update(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
mustCreate("Update", t, doc, integrationTestMap)
h := testHelper{t}
h.mustCreate(doc, integrationTestMap)
fpus := []Update{
{Path: "bool", Value: false},
{Path: "time", Value: 17},
@ -400,11 +428,8 @@ func TestIntegration_Update(t *testing.T) {
{Path: "null", Value: Delete},
{Path: "noSuchField", Value: Delete}, // deleting a non-existent field is a no-op
}
wr, err := doc.Update(ctx, fpus)
if err != nil {
t.Fatal(err)
}
ds := mustGet("Update", t, doc)
wr := h.mustUpdate(doc, fpus)
ds := h.mustGet(doc)
got := ds.Data()
want := copyMap(wantIntegrationTestMap)
want["bool"] = false
@ -428,6 +453,7 @@ func TestIntegration_Update(t *testing.T) {
func TestIntegration_Collections(t *testing.T) {
ctx := context.Background()
c := integrationClient(t)
h := testHelper{t}
got, err := c.Collections(ctx).GetAll()
if err != nil {
t.Fatal(err)
@ -450,7 +476,7 @@ func TestIntegration_Collections(t *testing.T) {
id := collectionIDs.New()
cr := doc.Collection(id)
want = append(want, cr)
mustCreate("Collections", t, cr.NewDoc(), integrationTestMap)
h.mustCreate(cr.NewDoc(), integrationTestMap)
}
got, err = doc.Collections(ctx).GetAll()
if err != nil {
@ -476,12 +502,13 @@ func TestIntegration_ServerTimestamp(t *testing.T) {
D: map[string]interface{}{"x": ServerTimestamp},
// E is unset, so will get the server timestamp.
}
h := testHelper{t}
doc := integrationColl(t).NewDoc()
// Bound times of the RPC, with some slack for clock skew.
start := time.Now()
mustCreate("ServerTimestamp", t, doc, data)
h.mustCreate(doc, data)
end := time.Now()
ds := mustGet("ServerTimestamp", t, doc)
ds := h.mustGet(doc)
var got S
if err := ds.DataTo(&got); err != nil {
t.Fatal(err)
@ -501,6 +528,7 @@ func TestIntegration_ServerTimestamp(t *testing.T) {
func TestIntegration_MergeServerTimestamp(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
h := testHelper{t}
// Create a doc with an ordinary field "a" and a ServerTimestamp field "b".
_, err := doc.Set(ctx, map[string]interface{}{
@ -509,7 +537,7 @@ func TestIntegration_MergeServerTimestamp(t *testing.T) {
if err != nil {
t.Fatal(err)
}
docSnap := mustGet("MergeST #1", t, doc)
docSnap := h.mustGet(doc)
data1 := docSnap.Data()
// Merge with a document with a different value of "a". However,
// specify only "b" in the list of merge fields.
@ -520,7 +548,7 @@ func TestIntegration_MergeServerTimestamp(t *testing.T) {
t.Fatal(err)
}
// The result should leave "a" unchanged, while "b" is updated.
docSnap = mustGet("MergeST #2", t, doc)
docSnap = h.mustGet(doc)
data2 := docSnap.Data()
if got, want := data2["a"], data1["a"]; got != want {
t.Errorf("got %v, want %v", got, want)
@ -535,6 +563,7 @@ func TestIntegration_MergeServerTimestamp(t *testing.T) {
func TestIntegration_MergeNestedServerTimestamp(t *testing.T) {
ctx := context.Background()
doc := integrationColl(t).NewDoc()
h := testHelper{t}
// Create a doc with an ordinary field "a" a ServerTimestamp field "b",
// and a second ServerTimestamp field "c.d".
@ -546,7 +575,7 @@ func TestIntegration_MergeNestedServerTimestamp(t *testing.T) {
if err != nil {
t.Fatal(err)
}
data1 := mustGet("MergeNST #1", t, doc).Data()
data1 := h.mustGet(doc).Data()
// Merge with a document with a different value of "a". However,
// specify only "c.d" in the list of merge fields.
_, err = doc.Set(ctx,
@ -560,7 +589,7 @@ func TestIntegration_MergeNestedServerTimestamp(t *testing.T) {
t.Fatal(err)
}
// The result should leave "a" and "b" unchanged, while "c.d" is updated.
data2 := mustGet("MergeNST #2", t, doc).Data()
data2 := h.mustGet(doc).Data()
if got, want := data2["a"], data1["a"]; got != want {
t.Errorf("a: got %v, want %v", got, want)
}
@ -579,6 +608,7 @@ func TestIntegration_MergeNestedServerTimestamp(t *testing.T) {
func TestIntegration_WriteBatch(t *testing.T) {
ctx := context.Background()
b := integrationClient(t).Batch()
h := testHelper{t}
doc1 := iColl.NewDoc()
doc2 := iColl.NewDoc()
b.Create(doc1, integrationTestMap)
@ -593,14 +623,14 @@ func TestIntegration_WriteBatch(t *testing.T) {
if got, want := len(wrs), 4; got != want {
t.Fatalf("got %d WriteResults, want %d", got, want)
}
got1 := mustGet("WriteBatch #1", t, doc1).Data()
got1 := h.mustGet(doc1).Data()
want := copyMap(wantIntegrationTestMap)
want["bool"] = false
delete(want, "str")
if !testEqual(got1, want) {
t.Errorf("got\n%#v\nwant\n%#v", got1, want)
}
got2 := mustGet("WriteBatch #2", t, doc2).Data()
got2 := h.mustGet(doc2).Data()
if !testEqual(got2, wantIntegrationTestMap) {
t.Errorf("got\n%#v\nwant\n%#v", got2, wantIntegrationTestMap)
}
@ -611,6 +641,7 @@ func TestIntegration_WriteBatch(t *testing.T) {
func TestIntegration_Query(t *testing.T) {
ctx := context.Background()
coll := integrationColl(t)
h := testHelper{t}
var docs []*DocumentRef
var wants []map[string]interface{}
for i := 0; i < 3; i++ {
@ -618,11 +649,7 @@ func TestIntegration_Query(t *testing.T) {
docs = append(docs, doc)
// To support running this test in parallel with the others, use a field name
// that we don't use anywhere else.
mustCreate(fmt.Sprintf("Query #%d", i), t, doc,
map[string]interface{}{
"q": i,
"x": 1,
})
h.mustCreate(doc, map[string]interface{}{"q": i, "x": 1})
wants = append(wants, map[string]interface{}{"q": int64(i)})
}
q := coll.Select("q").OrderBy("q", Asc)
@ -686,9 +713,43 @@ func TestIntegration_Query(t *testing.T) {
}
}
// Test unary filters.
func TestIntegration_QueryUnary(t *testing.T) {
ctx := context.Background()
coll := integrationColl(t)
h := testHelper{t}
h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": "a"})
h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": nil})
h.mustCreate(coll.NewDoc(), map[string]interface{}{"x": 2, "q": math.NaN()})
wantNull := map[string]interface{}{"q": nil}
wantNaN := map[string]interface{}{"q": math.NaN()}
base := coll.Select("q").Where("x", "==", 2)
for _, test := range []struct {
q Query
want map[string]interface{}
}{
{base.Where("q", "==", nil), wantNull},
{base.Where("q", "==", math.NaN()), wantNaN},
} {
got, err := test.q.Documents(ctx).GetAll()
if err != nil {
t.Fatal(err)
}
if len(got) != 1 {
t.Errorf("got %d responses, want 1", len(got))
continue
}
if g, w := got[0].Data(), test.want; !testEqual(g, w) {
t.Errorf("%v: got %v, want %v", test.q, g, w)
}
}
}
// Test the special DocumentID field in queries.
func TestIntegration_QueryName(t *testing.T) {
ctx := context.Background()
h := testHelper{t}
checkIDs := func(q Query, wantIDs []string) {
gots, err := q.Documents(ctx).GetAll()
@ -709,7 +770,7 @@ func TestIntegration_QueryName(t *testing.T) {
var wantIDs []string
for i := 0; i < 3; i++ {
doc := coll.NewDoc()
mustCreate(fmt.Sprintf("Query #%d", i), t, doc, map[string]interface{}{"nm": 1})
h.mustCreate(doc, map[string]interface{}{"nm": 1})
wantIDs = append(wantIDs, doc.ID)
}
sort.Strings(wantIDs)
@ -727,12 +788,13 @@ func TestIntegration_QueryName(t *testing.T) {
func TestIntegration_QueryNested(t *testing.T) {
ctx := context.Background()
h := testHelper{t}
coll1 := integrationColl(t)
doc1 := coll1.NewDoc()
coll2 := doc1.Collection(collectionIDs.New())
doc2 := coll2.NewDoc()
wantData := map[string]interface{}{"x": int64(1)}
mustCreate("QueryNested", t, doc2, wantData)
h.mustCreate(doc2, wantData)
q := coll2.Select("x")
got, err := q.Documents(ctx).GetAll()
if err != nil {
@ -748,11 +810,14 @@ func TestIntegration_QueryNested(t *testing.T) {
func TestIntegration_RunTransaction(t *testing.T) {
ctx := context.Background()
h := testHelper{t}
type Player struct {
Name string
Score int
Star bool `firestore:"*"`
}
pat := Player{Name: "Pat", Score: 3, Star: false}
client := integrationClient(t)
patDoc := iColl.Doc("pat")
@ -784,12 +849,12 @@ func TestIntegration_RunTransaction(t *testing.T) {
return anError
}
mustCreate("RunTransaction", t, patDoc, pat)
h.mustCreate(patDoc, pat)
err := client.RunTransaction(ctx, incPat)
if err != nil {
t.Fatal(err)
}
ds := mustGet("RunTransaction", t, patDoc)
ds := h.mustGet(patDoc)
var got Player
if err := ds.DataTo(&got); err != nil {
t.Fatal(err)
@ -816,6 +881,7 @@ func TestIntegration_RunTransaction(t *testing.T) {
func TestIntegration_TransactionGetAll(t *testing.T) {
ctx := context.Background()
h := testHelper{t}
type Player struct {
Name string
Score int
@ -825,8 +891,8 @@ func TestIntegration_TransactionGetAll(t *testing.T) {
client := integrationClient(t)
leeDoc := iColl.Doc("lee")
samDoc := iColl.Doc("sam")
mustCreate("TransactionGetAll", t, leeDoc, lee)
mustCreate("TransactionGetAll", t, samDoc, sam)
h.mustCreate(leeDoc, lee)
h.mustCreate(samDoc, sam)
err := client.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
docs, err := tx.GetAll([]*DocumentRef{samDoc, leeDoc})
@ -849,26 +915,167 @@ func TestIntegration_TransactionGetAll(t *testing.T) {
}
}
func TestIntegration_WatchDocument(t *testing.T) {
coll := integrationColl(t)
ctx := context.Background()
h := testHelper{t}
doc := coll.NewDoc()
it := doc.Snapshots(ctx)
defer it.Stop()
next := func() *DocumentSnapshot {
snap, err := it.Next()
if err != nil {
t.Fatal(err)
}
return snap
}
snap := next()
if snap.Exists() {
t.Fatal("snapshot exists; it should not")
}
want := map[string]interface{}{"a": int64(1), "b": "two"}
h.mustCreate(doc, want)
snap = next()
if got := snap.Data(); !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
h.mustUpdate(doc, []Update{{Path: "a", Value: int64(2)}})
want["a"] = int64(2)
snap = next()
if got := snap.Data(); !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
h.mustDelete(doc)
snap = next()
if snap.Exists() {
t.Fatal("snapshot exists; it should not")
}
h.mustCreate(doc, want)
snap = next()
if got := snap.Data(); !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
}
type imap map[string]interface{}
func TestIntegration_WatchQuery(t *testing.T) {
ctx := context.Background()
coll := integrationColl(t)
h := testHelper{t}
q := coll.Where("e", ">", 1).OrderBy("e", Asc)
it := q.Snapshots(ctx)
defer it.Stop()
next := func() ([]*DocumentSnapshot, []DocumentChange) {
diter, err := it.Next()
if err != nil {
t.Fatal(err)
}
if it.ReadTime.IsZero() {
t.Fatal("zero time")
}
ds, err := diter.GetAll()
if err != nil {
t.Fatal(err)
}
if it.Size != len(ds) {
t.Fatalf("Size=%d but we have %d docs", it.Size, len(ds))
}
return ds, it.Changes
}
copts := append([]cmp.Option{cmpopts.IgnoreFields(DocumentSnapshot{}, "ReadTime")}, cmpOpts...)
check := func(msg string, wantd []*DocumentSnapshot, wantc []DocumentChange) {
gotd, gotc := next()
if diff := testutil.Diff(gotd, wantd, copts...); diff != "" {
t.Errorf("%s: %s", msg, diff)
}
if diff := testutil.Diff(gotc, wantc, copts...); diff != "" {
t.Errorf("%s: %s", msg, diff)
}
}
check("initial", nil, nil)
doc1 := coll.NewDoc()
h.mustCreate(doc1, imap{"e": int64(2), "b": "two"})
wds := h.mustGet(doc1)
check("one",
[]*DocumentSnapshot{wds},
[]DocumentChange{{Kind: DocumentAdded, Doc: wds, OldIndex: -1, NewIndex: 0}})
// Add a doc that does not match. We won't see a snapshot for this.
doc2 := coll.NewDoc()
h.mustCreate(doc2, imap{"e": int64(1)})
// Update the first doc. We should see the change. We won't see doc2.
h.mustUpdate(doc1, []Update{{Path: "e", Value: int64(3)}})
wds = h.mustGet(doc1)
check("update",
[]*DocumentSnapshot{wds},
[]DocumentChange{{Kind: DocumentModified, Doc: wds, OldIndex: 0, NewIndex: 0}})
// Now update doc so that it is not in the query. We should see a snapshot with no docs.
h.mustUpdate(doc1, []Update{{Path: "e", Value: int64(0)}})
check("update2", nil, []DocumentChange{{Kind: DocumentRemoved, Doc: wds, OldIndex: 0, NewIndex: -1}})
// Add two docs out of order. We should see them in order.
doc3 := coll.NewDoc()
doc4 := coll.NewDoc()
want3 := imap{"e": int64(5)}
want4 := imap{"e": int64(4)}
h.mustCreate(doc3, want3)
h.mustCreate(doc4, want4)
wds4 := h.mustGet(doc4)
wds3 := h.mustGet(doc3)
check("two#1",
[]*DocumentSnapshot{wds3},
[]DocumentChange{{Kind: DocumentAdded, Doc: wds3, OldIndex: -1, NewIndex: 0}})
check("two#2",
[]*DocumentSnapshot{wds4, wds3},
[]DocumentChange{{Kind: DocumentAdded, Doc: wds4, OldIndex: -1, NewIndex: 0}})
// Delete a doc.
h.mustDelete(doc4)
check("after del", []*DocumentSnapshot{wds3}, []DocumentChange{{Kind: DocumentRemoved, Doc: wds4, OldIndex: 0, NewIndex: -1}})
}
func TestIntegration_WatchQueryCancel(t *testing.T) {
ctx := context.Background()
coll := integrationColl(t)
q := coll.Where("e", ">", 1).OrderBy("e", Asc)
ctx, cancel := context.WithCancel(ctx)
it := q.Snapshots(ctx)
defer it.Stop()
// First call opens the stream.
_, err := it.Next()
if err != nil {
t.Fatal(err)
}
cancel()
_, err = it.Next()
codeEq(t, "after cancel", codes.Canceled, err)
}
func codeEq(t *testing.T, msg string, code codes.Code, err error) {
if grpc.Code(err) != code {
t.Fatalf("%s:\ngot <%v>\nwant code %s", msg, err, code)
}
}
func mustCreate(msg string, t *testing.T, doc *DocumentRef, data interface{}) *WriteResult {
wr, err := doc.Create(context.Background(), data)
if err != nil {
t.Fatalf("%s: creating: %v", msg, err)
func loc() string {
_, file, line, ok := runtime.Caller(2)
if !ok {
return "???"
}
return wr
}
func mustGet(msg string, t *testing.T, doc *DocumentRef) *DocumentSnapshot {
d, err := doc.Get(context.Background())
if err != nil {
t.Fatalf("%s: getting: %v", msg, err)
}
return d
return fmt.Sprintf("%s:%d", filepath.Base(file), line)
}
func copyMap(m map[string]interface{}) map[string]interface{} {
@ -888,3 +1095,39 @@ func checkTimeBetween(t *testing.T, got, low, high time.Time) {
t.Fatalf("got %s, not in [%s, %s]", got, low, high)
}
}
type testHelper struct {
t *testing.T
}
func (h testHelper) mustCreate(doc *DocumentRef, data interface{}) *WriteResult {
wr, err := doc.Create(context.Background(), data)
if err != nil {
h.t.Fatalf("%s: creating: %v", loc(), err)
}
return wr
}
func (h testHelper) mustUpdate(doc *DocumentRef, updates []Update) *WriteResult {
wr, err := doc.Update(context.Background(), updates)
if err != nil {
h.t.Fatalf("%s: updating: %v", loc(), err)
}
return wr
}
func (h testHelper) mustGet(doc *DocumentRef) *DocumentSnapshot {
d, err := doc.Get(context.Background())
if err != nil {
h.t.Fatalf("%s: getting: %v", loc(), err)
}
return d
}
func (h testHelper) mustDelete(doc *DocumentRef) *WriteResult {
wr, err := doc.Delete(context.Background())
if err != nil {
h.t.Fatalf("%s: updating: %v", loc(), err)
}
return wr
}

@ -21,6 +21,9 @@ database.
See https://cloud.google.com/firestore/docs for an introduction
to Cloud Firestore and additional help on using the Firestore API.
Note: you can't use both Cloud Firestore and Cloud Datastore in the same
project.
Creating a Client
To start working with this package, create a client with a project ID:

216
vendor/cloud.google.com/go/firestore/order.go generated vendored Normal file

@ -0,0 +1,216 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firestore
import (
"bytes"
"fmt"
"math"
"sort"
"strings"
tspb "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
)
// Returns a negative number, zero, or a positive number depending on whether a is
// less than, equal to, or greater than b according to Firestore's ordering of
// values.
func compareValues(a, b *pb.Value) int {
ta := typeOrder(a)
tb := typeOrder(b)
if ta != tb {
return compareInt64s(int64(ta), int64(tb))
}
switch a := a.ValueType.(type) {
case *pb.Value_NullValue:
return 0 // nulls are equal
case *pb.Value_BooleanValue:
av := a.BooleanValue
bv := b.GetBooleanValue()
switch {
case av && !bv:
return 1
case bv && !av:
return -1
default:
return 0
}
case *pb.Value_IntegerValue:
return compareNumbers(float64(a.IntegerValue), toFloat(b))
case *pb.Value_DoubleValue:
return compareNumbers(a.DoubleValue, toFloat(b))
case *pb.Value_TimestampValue:
return compareTimestamps(a.TimestampValue, b.GetTimestampValue())
case *pb.Value_StringValue:
return strings.Compare(a.StringValue, b.GetStringValue())
case *pb.Value_BytesValue:
return bytes.Compare(a.BytesValue, b.GetBytesValue())
case *pb.Value_ReferenceValue:
return compareReferences(a.ReferenceValue, b.GetReferenceValue())
case *pb.Value_GeoPointValue:
ag := a.GeoPointValue
bg := b.GetGeoPointValue()
if ag.Latitude != bg.Latitude {
return compareFloat64s(ag.Latitude, bg.Latitude)
}
return compareFloat64s(ag.Longitude, bg.Longitude)
case *pb.Value_ArrayValue:
return compareArrays(a.ArrayValue.Values, b.GetArrayValue().Values)
case *pb.Value_MapValue:
return compareMaps(a.MapValue.Fields, b.GetMapValue().Fields)
default:
panic(fmt.Sprintf("bad value type: %v", a))
}
}
// Treats NaN as less than any non-NaN.
func compareNumbers(a, b float64) int {
switch {
case math.IsNaN(a):
if math.IsNaN(b) {
return 0
}
return -1
case math.IsNaN(b):
return 1
default:
return compareFloat64s(a, b)
}
}
// Return v as a float64, assuming it's an Integer or Double.
func toFloat(v *pb.Value) float64 {
if x, ok := v.ValueType.(*pb.Value_IntegerValue); ok {
return float64(x.IntegerValue)
}
return v.GetDoubleValue()
}
func compareTimestamps(a, b *tspb.Timestamp) int {
if c := compareInt64s(a.Seconds, b.Seconds); c != 0 {
return c
}
return compareInt64s(int64(a.Nanos), int64(b.Nanos))
}
func compareReferences(a, b string) int {
// Compare path components lexicographically.
pa := strings.Split(a, "/")
pb := strings.Split(b, "/")
return compareSequences(len(pa), len(pb), func(i int) int {
return strings.Compare(pa[i], pb[i])
})
}
func compareArrays(a, b []*pb.Value) int {
return compareSequences(len(a), len(b), func(i int) int {
return compareValues(a[i], b[i])
})
}
func compareMaps(a, b map[string]*pb.Value) int {
sortedKeys := func(m map[string]*pb.Value) []string {
var ks []string
for k := range m {
ks = append(ks, k)
}
sort.Strings(ks)
return ks
}
aks := sortedKeys(a)
bks := sortedKeys(b)
return compareSequences(len(aks), len(bks), func(i int) int {
if c := strings.Compare(aks[i], bks[i]); c != 0 {
return c
}
k := aks[i]
return compareValues(a[k], b[k])
})
}
func compareSequences(len1, len2 int, compare func(int) int) int {
for i := 0; i < len1 && i < len2; i++ {
if c := compare(i); c != 0 {
return c
}
}
return compareInt64s(int64(len1), int64(len2))
}
func compareFloat64s(a, b float64) int {
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
}
func compareInt64s(a, b int64) int {
switch {
case a < b:
return -1
case a > b:
return 1
default:
return 0
}
}
// Return an integer corresponding to the type of value stored in v, such that
// comparing the resulting integers gives the Firestore ordering for types.
func typeOrder(v *pb.Value) int {
switch v.ValueType.(type) {
case *pb.Value_NullValue:
return 0
case *pb.Value_BooleanValue:
return 1
case *pb.Value_IntegerValue:
return 2
case *pb.Value_DoubleValue:
return 2
case *pb.Value_TimestampValue:
return 3
case *pb.Value_StringValue:
return 4
case *pb.Value_BytesValue:
return 5
case *pb.Value_ReferenceValue:
return 6
case *pb.Value_GeoPointValue:
return 7
case *pb.Value_ArrayValue:
return 8
case *pb.Value_MapValue:
return 9
default:
panic(fmt.Sprintf("bad value type: %v", v))
}
}

118
vendor/cloud.google.com/go/firestore/order_test.go generated vendored Normal file

@ -0,0 +1,118 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firestore
import (
"math"
"testing"
"time"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/genproto/googleapis/type/latlng"
)
func TestCompareValues(t *testing.T) {
// Ordered list of values.
vals := []*pb.Value{
nullValue,
boolval(false),
boolval(true),
floatval(math.NaN()),
floatval(math.Inf(-1)),
floatval(-math.MaxFloat64),
int64val(math.MinInt64),
floatval(-1.1),
intval(-1),
intval(0),
floatval(math.SmallestNonzeroFloat64),
intval(1),
floatval(1.1),
intval(2),
int64val(math.MaxInt64),
floatval(math.MaxFloat64),
floatval(math.Inf(1)),
tsval(time.Date(2016, 5, 20, 10, 20, 0, 0, time.UTC)),
tsval(time.Date(2016, 10, 21, 15, 32, 0, 0, time.UTC)),
strval(""),
strval("\u0000\ud7ff\ue000\uffff"),
strval("(╯°□°)╯︵ ┻━┻"),
strval("a"),
strval("abc def"),
strval("e\u0301b"),
strval("æ"),
strval("\u00e9a"),
bytesval([]byte{}),
bytesval([]byte{0}),
bytesval([]byte{0, 1, 2, 3, 4}),
bytesval([]byte{0, 1, 2, 4, 3}),
bytesval([]byte{255}),
refval("projects/p1/databases/d1/documents/c1/doc1"),
refval("projects/p1/databases/d1/documents/c1/doc2"),
refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc1"),
refval("projects/p1/databases/d1/documents/c1/doc2/c2/doc2"),
refval("projects/p1/databases/d1/documents/c10/doc1"),
refval("projects/p1/databases/dkkkkklkjnjkkk1/documents/c2/doc1"),
refval("projects/p2/databases/d2/documents/c1/doc1"),
refval("projects/p2/databases/d2/documents/c1-/doc1"),
geopoint(-90, -180),
geopoint(-90, 0),
geopoint(-90, 180),
geopoint(0, -180),
geopoint(0, 0),
geopoint(0, 180),
geopoint(1, -180),
geopoint(1, 0),
geopoint(1, 180),
geopoint(90, -180),
geopoint(90, 0),
geopoint(90, 180),
arrayval(),
arrayval(strval("bar")),
arrayval(strval("foo")),
arrayval(strval("foo"), intval(1)),
arrayval(strval("foo"), intval(2)),
arrayval(strval("foo"), strval("0")),
mapval(map[string]*pb.Value{"bar": intval(0)}),
mapval(map[string]*pb.Value{"bar": intval(0), "foo": intval(1)}),
mapval(map[string]*pb.Value{"foo": intval(1)}),
mapval(map[string]*pb.Value{"foo": intval(2)}),
mapval(map[string]*pb.Value{"foo": strval("0")}),
}
for i, v1 := range vals {
if got := compareValues(v1, v1); got != 0 {
t.Errorf("compare(%v, %v) == %d, want 0", v1, v1, got)
}
for _, v2 := range vals[i+1:] {
if got := compareValues(v1, v2); got != -1 {
t.Errorf("compare(%v, %v) == %d, want -1", v1, v2, got)
}
if got := compareValues(v2, v1); got != 1 {
t.Errorf("compare(%v, %v) == %d, want 1", v1, v2, got)
}
}
}
// Integers and Doubles order the same.
n1 := intval(17)
n2 := floatval(17)
if got := compareValues(n1, n2); got != 0 {
t.Errorf("compare(%v, %v) == %d, want 0", n1, n2, got)
}
}
func geopoint(lat, lng float64) *pb.Value {
return geoval(&latlng.LatLng{Latitude: lat, Longitude: lng})
}

@ -20,11 +20,13 @@ import (
"io"
"math"
"reflect"
"time"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"cloud.google.com/go/internal/btree"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/api/iterator"
)
@ -397,6 +399,41 @@ func (q *Query) docSnapshotToCursorValues(ds *DocumentSnapshot, orders []order)
return vals, nil
}
// Returns a function that compares DocumentSnapshots according to q's ordering.
func (q Query) compareFunc() func(d1, d2 *DocumentSnapshot) (int, error) {
// Add implicit sorting by name, using the last specified direction.
lastDir := Asc
if len(q.orders) > 0 {
lastDir = q.orders[len(q.orders)-1].dir
}
orders := append(q.copyOrders(), order{[]string{DocumentID}, lastDir})
return func(d1, d2 *DocumentSnapshot) (int, error) {
for _, ord := range orders {
var cmp int
if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID {
cmp = compareReferences(d1.Ref.Path, d2.Ref.Path)
} else {
v1, err := valueAtPath(ord.fieldPath, d1.proto.Fields)
if err != nil {
return 0, err
}
v2, err := valueAtPath(ord.fieldPath, d2.proto.Fields)
if err != nil {
return 0, err
}
cmp = compareValues(v1, v2)
}
if cmp != 0 {
if ord.dir == Desc {
cmp = -cmp
}
return cmp, nil
}
}
return 0, nil
}
}
type filter struct {
fieldPath FieldPath
op string
@ -407,6 +444,21 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
if err := f.fieldPath.validate(); err != nil {
return nil, err
}
if uop, ok := unaryOpFor(f.value); ok {
if f.op != "==" {
return nil, fmt.Errorf("firestore: must use '==' when comparing %v", f.value)
}
return &pb.StructuredQuery_Filter{
FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
Field: fref(f.fieldPath),
},
Op: uop,
},
},
}, nil
}
var op pb.StructuredQuery_FieldFilter_Operator
switch f.op {
case "<":
@ -431,7 +483,7 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
}
return &pb.StructuredQuery_Filter{
FilterType: &pb.StructuredQuery_Filter_FieldFilter{
&pb.StructuredQuery_FieldFilter{
FieldFilter: &pb.StructuredQuery_FieldFilter{
Field: fref(f.fieldPath),
Op: op,
Value: val,
@ -440,6 +492,28 @@ func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
}, nil
}
func unaryOpFor(value interface{}) (pb.StructuredQuery_UnaryFilter_Operator, bool) {
switch {
case value == nil:
return pb.StructuredQuery_UnaryFilter_IS_NULL, true
case isNaN(value):
return pb.StructuredQuery_UnaryFilter_IS_NAN, true
default:
return pb.StructuredQuery_UnaryFilter_OPERATOR_UNSPECIFIED, false
}
}
func isNaN(x interface{}) bool {
switch x := x.(type) {
case float32:
return math.IsNaN(float64(x))
case float64:
return math.IsNaN(x)
default:
return false
}
}
type order struct {
fieldPath FieldPath
dir Direction
@ -473,19 +547,25 @@ func trunc32(i int) int32 {
// Documents returns an iterator over the query's resulting documents.
func (q Query) Documents(ctx context.Context) *DocumentIterator {
return &DocumentIterator{
ctx: withResourceHeader(ctx, q.c.path()),
q: &q,
err: checkTransaction(ctx),
iter: newQueryDocumentIterator(withResourceHeader(ctx, q.c.path()), &q, nil),
err: checkTransaction(ctx),
}
}
// DocumentIterator is an iterator over documents returned by a query.
type DocumentIterator struct {
ctx context.Context
q *Query
tid []byte // transaction ID, if any
streamClient pb.Firestore_RunQueryClient
err error
iter docIterator
err error
}
// Unexported interface so we can have two different kinds of DocumentIterator: one
// for straight queries, and one for query snapshots. We do it this way instead of
// making DocumentIterator an interface because in the client libraries, iterators are
// always concrete types, and the fact that this one has two different implementations
// is an internal detail.
type docIterator interface {
next() (*DocumentSnapshot, error)
stop()
}
// Next returns the next result. Its second return value is iterator.Done if there
@ -495,56 +575,29 @@ func (it *DocumentIterator) Next() (*DocumentSnapshot, error) {
if it.err != nil {
return nil, it.err
}
client := it.q.c
if it.streamClient == nil {
sq, err := it.q.toProto()
if err != nil {
it.err = err
return nil, err
}
req := &pb.RunQueryRequest{
Parent: it.q.parentPath,
QueryType: &pb.RunQueryRequest_StructuredQuery{sq},
}
if it.tid != nil {
req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid}
}
it.streamClient, it.err = client.c.RunQuery(it.ctx, req)
if it.err != nil {
return nil, it.err
}
}
var res *pb.RunQueryResponse
var err error
for {
res, err = it.streamClient.Recv()
if err == io.EOF {
err = iterator.Done
}
if err != nil {
it.err = err
return nil, it.err
}
if res.Document != nil {
break
}
// No document => partial progress; keep receiving.
}
docRef, err := pathToDoc(res.Document.Name, client)
ds, err := it.iter.next()
if err != nil {
it.err = err
return nil, err
}
doc, err := newDocumentSnapshot(docRef, res.Document, client)
if err != nil {
it.err = err
return nil, err
return ds, err
}
// Stop stops the iterator, freeing its resources.
// Always call Stop when you are done with an iterator.
// It is not safe to call Stop concurrently with Next.
func (it *DocumentIterator) Stop() {
if it.iter != nil { // possible in error cases
it.iter.stop()
}
if it.err == nil {
it.err = iterator.Done
}
return doc, nil
}
// GetAll returns all the documents remaining from the iterator.
// It is not necessary to call Stop on the iterator after calling GetAll.
func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) {
defer it.Stop()
var docs []*DocumentSnapshot
for {
doc, err := it.Next()
@ -559,7 +612,146 @@ func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) {
return docs, nil
}
// TODO(jba): Does the iterator need a Stop or Close method? I don't think so--
// I don't think the client can terminate a streaming receive except perhaps
// by cancelling the context, and the user can do that themselves if they wish.
// Find out for sure.
type queryDocumentIterator struct {
ctx context.Context
cancel func()
q *Query
tid []byte // transaction ID, if any
streamClient pb.Firestore_RunQueryClient
}
func newQueryDocumentIterator(ctx context.Context, q *Query, tid []byte) *queryDocumentIterator {
ctx, cancel := context.WithCancel(ctx)
return &queryDocumentIterator{
ctx: ctx,
cancel: cancel,
q: q,
tid: tid,
}
}
func (it *queryDocumentIterator) next() (*DocumentSnapshot, error) {
client := it.q.c
if it.streamClient == nil {
sq, err := it.q.toProto()
if err != nil {
return nil, err
}
req := &pb.RunQueryRequest{
Parent: it.q.parentPath,
QueryType: &pb.RunQueryRequest_StructuredQuery{sq},
}
if it.tid != nil {
req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid}
}
it.streamClient, err = client.c.RunQuery(it.ctx, req)
if err != nil {
return nil, err
}
}
var res *pb.RunQueryResponse
var err error
for {
res, err = it.streamClient.Recv()
if err == io.EOF {
return nil, iterator.Done
}
if err != nil {
return nil, err
}
if res.Document != nil {
break
}
// No document => partial progress; keep receiving.
}
docRef, err := pathToDoc(res.Document.Name, client)
if err != nil {
return nil, err
}
doc, err := newDocumentSnapshot(docRef, res.Document, client, res.ReadTime)
if err != nil {
return nil, err
}
return doc, nil
}
func (it *queryDocumentIterator) stop() {
it.cancel()
}
// Snapshots returns an iterator over snapshots of the query. Each time the query
// results change, a new snapshot will be generated.
func (q Query) Snapshots(ctx context.Context) *QuerySnapshotIterator {
ws, err := newWatchStreamForQuery(ctx, q)
if err != nil {
return &QuerySnapshotIterator{err: err}
}
return &QuerySnapshotIterator{
Query: q,
ws: ws,
}
}
// QuerySnapshotIterator is an iterator over snapshots of a query.
// Call Next on the iterator to get a snapshot of the query's results each time they change.
// Call Stop on the iterator when done.
//
// For an example, see Query.Snapshots.
type QuerySnapshotIterator struct {
// The Query used to construct this iterator.
Query Query
// The time at which the most recent snapshot was obtained from Firestore.
ReadTime time.Time
// The number of results in the most recent snapshot.
Size int
// The changes since the previous snapshot.
Changes []DocumentChange
ws *watchStream
err error
}
// Next blocks until the query's results change, then returns a DocumentIterator for
// the current results.
//
// Next never returns iterator.Done unless it is called after Stop.
func (it *QuerySnapshotIterator) Next() (*DocumentIterator, error) {
if it.err != nil {
return nil, it.err
}
btree, changes, readTime, err := it.ws.nextSnapshot()
if err != nil {
if err == io.EOF {
err = iterator.Done
}
it.err = err
return nil, it.err
}
it.Changes = changes
it.ReadTime = readTime
it.Size = btree.Len()
return &DocumentIterator{
iter: (*btreeDocumentIterator)(btree.BeforeIndex(0)),
}, nil
}
// Stop stops receiving snapshots.
// You should always call Stop when you are done with an iterator, to free up resources.
// It is not safe to call Stop concurrently with Next.
func (it *QuerySnapshotIterator) Stop() {
it.ws.stop()
}
type btreeDocumentIterator btree.Iterator
func (it *btreeDocumentIterator) next() (*DocumentSnapshot, error) {
if !(*btree.Iterator)(it).Next() {
return nil, iterator.Done
}
return it.Key.(*DocumentSnapshot), nil
}
func (*btreeDocumentIterator) stop() {}

@ -15,6 +15,8 @@
package firestore
import (
"math"
"sort"
"testing"
"golang.org/x/net/context"
@ -22,9 +24,58 @@ import (
"cloud.google.com/go/internal/pretty"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
tspb "github.com/golang/protobuf/ptypes/timestamp"
"github.com/golang/protobuf/ptypes/wrappers"
)
func TestFilterToProto(t *testing.T) {
for _, test := range []struct {
in filter
want *pb.StructuredQuery_Filter
}{
{
filter{[]string{"a"}, ">", 1},
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_FieldFilter{
FieldFilter: &pb.StructuredQuery_FieldFilter{
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
Op: pb.StructuredQuery_FieldFilter_GREATER_THAN,
Value: intval(1),
},
}},
},
{
filter{[]string{"a"}, "==", nil},
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
},
Op: pb.StructuredQuery_UnaryFilter_IS_NULL,
},
}},
},
{
filter{[]string{"a"}, "==", math.NaN()},
&pb.StructuredQuery_Filter{FilterType: &pb.StructuredQuery_Filter_UnaryFilter{
UnaryFilter: &pb.StructuredQuery_UnaryFilter{
OperandType: &pb.StructuredQuery_UnaryFilter_Field{
Field: &pb.StructuredQuery_FieldReference{FieldPath: "a"},
},
Op: pb.StructuredQuery_UnaryFilter_IS_NAN,
},
}},
},
} {
got, err := test.in.toProto()
if err != nil {
t.Fatal(err)
}
if !testEqual(got, test.want) {
t.Errorf("%+v:\ngot\n%v\nwant\n%v", test.in, pretty.Value(got), pretty.Value(test.want))
}
}
}
func TestQueryToProto(t *testing.T) {
filtr := func(path []string, op string, val interface{}) *pb.StructuredQuery_Filter {
f, err := filter{path, op, val}.toProto()
@ -88,10 +139,15 @@ func TestQueryToProto(t *testing.T) {
},
},
{
desc: ` q.Where("a", ">", 5)`,
desc: `q.Where("a", ">", 5)`,
in: q.Where("a", ">", 5),
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, ">", 5)},
},
{
desc: `q.Where("a", "==", NaN)`,
in: q.Where("a", "==", float32(math.NaN())),
want: &pb.StructuredQuery{Where: filtr([]string{"a"}, "==", math.NaN())},
},
{
desc: `q.Where("a", ">", 5).Where("b", "<", "foo")`,
in: q.Where("a", ">", 5).Where("b", "<", "foo"),
@ -524,10 +580,10 @@ func TestQueryGetAll(t *testing.T) {
Fields: map[string]*pb.Value{"f": intval(1)},
},
}
wantReadTimes := []*tspb.Timestamp{aTimestamp, aTimestamp2}
srv.addRPC(nil, []interface{}{
&pb.RunQueryResponse{Document: wantPBDocs[0]},
&pb.RunQueryResponse{Document: wantPBDocs[1]},
&pb.RunQueryResponse{Document: wantPBDocs[0], ReadTime: aTimestamp},
&pb.RunQueryResponse{Document: wantPBDocs[1], ReadTime: aTimestamp2},
})
gotDocs, err := c.Collection("C").Documents(ctx).GetAll()
if err != nil {
@ -537,7 +593,7 @@ func TestQueryGetAll(t *testing.T) {
t.Errorf("got %d docs, wanted %d", got, want)
}
for i, got := range gotDocs {
want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c)
want, err := newDocumentSnapshot(c.Doc(docNames[i]), wantPBDocs[i], c, wantReadTimes[i])
if err != nil {
t.Fatal(err)
}
@ -549,3 +605,113 @@ func TestQueryGetAll(t *testing.T) {
}
}
}
func TestQueryCompareFunc(t *testing.T) {
mv := func(fields ...interface{}) map[string]*pb.Value {
m := map[string]*pb.Value{}
for i := 0; i < len(fields); i += 2 {
m[fields[i].(string)] = fields[i+1].(*pb.Value)
}
return m
}
snap := func(ref *DocumentRef, fields map[string]*pb.Value) *DocumentSnapshot {
return &DocumentSnapshot{Ref: ref, proto: &pb.Document{Fields: fields}}
}
c := &Client{}
coll := c.Collection("C")
doc1 := coll.Doc("doc1")
doc2 := coll.Doc("doc2")
doc3 := coll.Doc("doc3")
doc4 := coll.Doc("doc4")
for _, test := range []struct {
q Query
in []*DocumentSnapshot
want []*DocumentSnapshot
}{
{
q: coll.OrderBy("foo", Asc),
in: []*DocumentSnapshot{
snap(doc3, mv("foo", intval(2))),
snap(doc4, mv("foo", intval(1))),
snap(doc2, mv("foo", intval(2))),
},
want: []*DocumentSnapshot{
snap(doc4, mv("foo", intval(1))),
snap(doc2, mv("foo", intval(2))),
snap(doc3, mv("foo", intval(2))),
},
},
{
q: coll.OrderBy("foo", Desc),
in: []*DocumentSnapshot{
snap(doc3, mv("foo", intval(2))),
snap(doc4, mv("foo", intval(1))),
snap(doc2, mv("foo", intval(2))),
},
want: []*DocumentSnapshot{
snap(doc3, mv("foo", intval(2))),
snap(doc2, mv("foo", intval(2))),
snap(doc4, mv("foo", intval(1))),
},
},
{
q: coll.OrderBy("foo.bar", Asc),
in: []*DocumentSnapshot{
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
},
want: []*DocumentSnapshot{
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
},
},
{
q: coll.OrderBy("foo.bar", Desc),
in: []*DocumentSnapshot{
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
},
want: []*DocumentSnapshot{
snap(doc3, mv("foo", mapval(mv("bar", intval(2))))),
snap(doc2, mv("foo", mapval(mv("bar", intval(2))))),
snap(doc1, mv("foo", mapval(mv("bar", intval(1))))),
},
},
} {
got := append([]*DocumentSnapshot(nil), test.in...)
sort.Sort(byQuery{test.q.compareFunc(), got})
if diff := testDiff(got, test.want); diff != "" {
t.Errorf("%+v: %s", test.q, diff)
}
}
// Want error on missing field.
q := coll.OrderBy("bar", Asc)
if q.err != nil {
t.Fatalf("bad query: %v", q.err)
}
cf := q.compareFunc()
s := snap(doc1, mv("foo", intval(1)))
if _, err := cf(s, s); err == nil {
t.Error("got nil, want error")
}
}
type byQuery struct {
compare func(d1, d2 *DocumentSnapshot) (int, error)
docs []*DocumentSnapshot
}
func (b byQuery) Len() int { return len(b.docs) }
func (b byQuery) Swap(i, j int) { b.docs[i], b.docs[j] = b.docs[j], b.docs[i] }
func (b byQuery) Less(i, j int) bool {
c, err := b.compare(b.docs[i], b.docs[j])
if err != nil {
panic(err)
}
return c < 0
}

@ -1 +1 @@
SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 3047565564b81726a57d7db719704ea8bf17a9ab
SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/test-suite.binproto)= 1502b0250a2ecd854b80509e3e456e46ade89ea7

@ -0,0 +1,19 @@
# DO NOT MODIFY. This file was generated by
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
# You can only compare NaN for equality.
description: "query: where clause with non-== comparison with NaN"
query: <
coll_path: "projects/projectID/databases/(default)/documents/C"
clauses: <
where: <
path: <
field: "a"
>
op: "<"
json_value: "\"NaN\""
>
>
is_error: true
>

@ -0,0 +1,19 @@
# DO NOT MODIFY. This file was generated by
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
# You can only compare Null for equality.
description: "query: where clause with non-== comparison with Null"
query: <
coll_path: "projects/projectID/databases/(default)/documents/C"
clauses: <
where: <
path: <
field: "a"
>
op: ">"
json_value: "null"
>
>
is_error: true
>

@ -0,0 +1,31 @@
# DO NOT MODIFY. This file was generated by
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
# A Where clause that tests for equality with NaN results in a unary filter.
description: "query: a Where clause comparing to NaN"
query: <
coll_path: "projects/projectID/databases/(default)/documents/C"
clauses: <
where: <
path: <
field: "a"
>
op: "=="
json_value: "\"NaN\""
>
>
query: <
from: <
collection_id: "C"
>
where: <
unary_filter: <
op: IS_NAN
field: <
field_path: "a"
>
>
>
>
>

@ -0,0 +1,31 @@
# DO NOT MODIFY. This file was generated by
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
# A Where clause that tests for equality with null results in a unary filter.
description: "query: a Where clause comparing to null"
query: <
coll_path: "projects/projectID/databases/(default)/documents/C"
clauses: <
where: <
path: <
field: "a"
>
op: "=="
json_value: "null"
>
>
query: <
from: <
collection_id: "C"
>
where: <
unary_filter: <
op: IS_NULL
field: <
field_path: "a"
>
>
>
>
>

@ -0,0 +1,47 @@
# DO NOT MODIFY. This file was generated by
# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
# If one nested field is deleted, and another isn't, preserve the second.
description: "update-paths: field paths with delete"
update_paths: <
doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
field_paths: <
field: "foo"
field: "bar"
>
field_paths: <
field: "foo"
field: "delete"
>
json_values: "1"
json_values: "\"Delete\""
request: <
database: "projects/projectID/databases/(default)"
writes: <
update: <
name: "projects/projectID/databases/(default)/documents/C/d"
fields: <
key: "foo"
value: <
map_value: <
fields: <
key: "bar"
value: <
integer_value: 1
>
>
>
>
>
>
update_mask: <
field_paths: "foo.bar"
field_paths: "foo.delete"
>
current_document: <
exists: true
>
>
>
>

@ -22,6 +22,7 @@ import (
"cloud.google.com/go/internal/fields"
"github.com/golang/protobuf/ptypes"
ts "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/genproto/googleapis/type/latlng"
)
@ -29,10 +30,11 @@ import (
var nullValue = &pb.Value{&pb.Value_NullValue{}}
var (
typeOfByteSlice = reflect.TypeOf([]byte{})
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil))
typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil))
typeOfByteSlice = reflect.TypeOf([]byte{})
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfLatLng = reflect.TypeOf((*latlng.LatLng)(nil))
typeOfDocumentRef = reflect.TypeOf((*DocumentRef)(nil))
typeOfProtoTimestamp = reflect.TypeOf((*ts.Timestamp)(nil))
)
// toProtoValue converts a Go value to a Firestore Value protobuf.
@ -64,6 +66,12 @@ func toProtoValue(v reflect.Value) (pbv *pb.Value, sawServerTimestamp bool, err
return nil, false, err
}
return &pb.Value{&pb.Value_TimestampValue{ts}}, false, nil
case *ts.Timestamp:
if x == nil {
// gRPC doesn't like nil oneofs. Use NullValue.
return nullValue, false, nil
}
return &pb.Value{&pb.Value_TimestampValue{x}}, false, nil
case *latlng.LatLng:
if x == nil {
// gRPC doesn't like nil oneofs. Use NullValue.
@ -240,7 +248,7 @@ func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, e
// isLeafType determines whether or not a type is a 'leaf type'
// and should not be recursed into, but considered one field.
func isLeafType(t reflect.Type) bool {
return t == typeOfGoTime || t == typeOfLatLng
return t == typeOfGoTime || t == typeOfLatLng || t == typeOfProtoTimestamp
}
var fieldCache = fields.NewCache(parseTag, nil, isLeafType)

@ -20,54 +20,58 @@ import (
"testing"
"time"
ts "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
"google.golang.org/genproto/googleapis/type/latlng"
)
type testStruct1 struct {
B bool
I int
U uint32
F float64
S string
Y []byte
T time.Time
G *latlng.LatLng
L []int
M map[string]int
P *int
B bool
I int
U uint32
F float64
S string
Y []byte
T time.Time
Ts *ts.Timestamp
G *latlng.LatLng
L []int
M map[string]int
P *int
}
var (
p = new(int)
testVal1 = testStruct1{
B: true,
I: 1,
U: 2,
F: 3.0,
S: "four",
Y: []byte{5},
T: tm,
G: ll,
L: []int{6},
M: map[string]int{"a": 7},
P: p,
B: true,
I: 1,
U: 2,
F: 3.0,
S: "four",
Y: []byte{5},
T: tm,
Ts: ptm,
G: ll,
L: []int{6},
M: map[string]int{"a": 7},
P: p,
}
mapVal1 = mapval(map[string]*pb.Value{
"B": boolval(true),
"I": intval(1),
"U": intval(2),
"F": floatval(3),
"S": &pb.Value{&pb.Value_StringValue{"four"}},
"Y": bytesval([]byte{5}),
"T": tsval(tm),
"G": geoval(ll),
"L": arrayval(intval(6)),
"M": mapval(map[string]*pb.Value{"a": intval(7)}),
"P": intval(8),
"B": boolval(true),
"I": intval(1),
"U": intval(2),
"F": floatval(3),
"S": &pb.Value{&pb.Value_StringValue{"four"}},
"Y": bytesval([]byte{5}),
"T": tsval(tm),
"Ts": &pb.Value{&pb.Value_TimestampValue{ptm}},
"G": geoval(ll),
"L": arrayval(intval(6)),
"M": mapval(map[string]*pb.Value{"a": intval(7)}),
"P": intval(8),
})
)
@ -81,6 +85,7 @@ func TestToProtoValue(t *testing.T) {
{[]int(nil), nullValue},
{map[string]int(nil), nullValue},
{(*testStruct1)(nil), nullValue},
{(*ts.Timestamp)(nil), nullValue},
{(*latlng.LatLng)(nil), nullValue},
{(*DocumentRef)(nil), nullValue},
{true, boolval(true)},
@ -90,6 +95,7 @@ func TestToProtoValue(t *testing.T) {
{"str", strval("str")},
{[]byte{1, 2}, bytesval([]byte{1, 2})},
{tm, tsval(tm)},
{ptm, &pb.Value{&pb.Value_TimestampValue{ptm}}},
{ll, geoval(ll)},
{[]int{1, 2}, arrayval(intval(1), intval(2))},
{&[]int{1, 2}, arrayval(intval(1), intval(2))},
@ -234,19 +240,21 @@ func TestToProtoValueTags(t *testing.T) {
}
func TestToProtoValueEmbedded(t *testing.T) {
// Embedded time.Time or LatLng should behave like non-embedded.
// Embedded time.Time, LatLng, or Timestamp should behave like non-embedded.
type embed struct {
time.Time
*latlng.LatLng
*ts.Timestamp
}
got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll}))
got, _, err := toProtoValue(reflect.ValueOf(embed{tm, ll, ptm}))
if err != nil {
t.Fatal(err)
}
want := mapval(map[string]*pb.Value{
"Time": tsval(tm),
"LatLng": geoval(ll),
"Time": tsval(tm),
"LatLng": geoval(ll),
"Timestamp": &pb.Value{&pb.Value_TimestampValue{ptm}},
})
if !testEqual(got, want) {
t.Errorf("got %+v, want %+v", got, want)

@ -23,6 +23,7 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Transaction represents a Firestore transaction.
@ -172,7 +173,7 @@ func (c *Client) RunTransaction(ctx context.Context, f func(context.Context, *Tr
}
// Use exponential backoff to avoid contention with other running
// transactions.
if cerr := gax.Sleep(ctx, backoff.Pause()); cerr != nil {
if cerr := sleep(ctx, backoff.Pause()); cerr != nil {
err = cerr
break
}
@ -198,24 +199,21 @@ func (t *Transaction) rollback() {
// Get gets the document in the context of the transaction. The transaction holds a
// pessimistic lock on the returned document.
func (t *Transaction) Get(dr *DocumentRef) (*DocumentSnapshot, error) {
if len(t.writes) > 0 {
t.readAfterWrite = true
return nil, errReadAfterWrite
}
docProto, err := t.c.c.GetDocument(t.ctx, &pb.GetDocumentRequest{
Name: dr.Path,
ConsistencySelector: &pb.GetDocumentRequest_Transaction{t.id},
})
docsnaps, err := t.GetAll([]*DocumentRef{dr})
if err != nil {
return nil, err
}
return newDocumentSnapshot(dr, docProto, t.c)
ds := docsnaps[0]
if !ds.Exists() {
return ds, status.Errorf(codes.NotFound, "%q not found", dr.Path)
}
return ds, nil
}
// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are
// returned in the order of the given DocumentRefs. If a document is not present, the
// corresponding DocumentSnapshot will be nil. The transaction holds a pessimistic
// lock on all of the returned documents.
// corresponding DocumentSnapshot's Exists method will return false. The transaction
// holds a pessimistic lock on all of the returned documents.
func (t *Transaction) GetAll(drs []*DocumentRef) ([]*DocumentSnapshot, error) {
if len(t.writes) > 0 {
t.readAfterWrite = true
@ -238,9 +236,7 @@ func (t *Transaction) Documents(q Queryer) *DocumentIterator {
return &DocumentIterator{err: errReadAfterWrite}
}
return &DocumentIterator{
ctx: t.ctx,
q: q.query(),
tid: t.id,
iter: newQueryDocumentIterator(t.ctx, q.query(), t.id),
}
}

@ -18,6 +18,7 @@ import (
"testing"
"golang.org/x/net/context"
"google.golang.org/grpc/status"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
@ -53,12 +54,16 @@ func TestRunTransaction(t *testing.T) {
Fields: map[string]*pb.Value{"count": intval(1)},
}
srv.addRPC(
&pb.GetDocumentRequest{
Name: db + "/documents/C/a",
ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid},
},
aDoc,
)
&pb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: []string{db + "/documents/C/a"},
ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid},
}, []interface{}{
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{aDoc},
ReadTime: aTimestamp2,
},
})
aDoc2 := &pb.Document{
Name: aDoc.Name,
Fields: map[string]*pb.Value{"count": intval(2)},
@ -112,6 +117,7 @@ func TestRunTransaction(t *testing.T) {
srv.addRPC(commitReq, &pb.CommitResponse{CommitTime: aTimestamp3})
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
it := tx.Documents(c.Collection("C"))
defer it.Stop()
_, err := it.Next()
if err != iterator.Done {
return err
@ -125,7 +131,7 @@ func TestRunTransaction(t *testing.T) {
// Retry entire transaction.
srv.reset()
srv.addRPC(beginReq, beginRes)
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
srv.addRPC(commitReq, status.Errorf(codes.Aborted, ""))
srv.addRPC(
&pb.BeginTransactionRequest{
Database: db,
@ -150,14 +156,15 @@ func TestTransactionErrors(t *testing.T) {
c, srv := newMock(t)
var (
tid = []byte{1}
internalErr = grpc.Errorf(codes.Internal, "so sad")
internalErr = status.Errorf(codes.Internal, "so sad")
beginReq = &pb.BeginTransactionRequest{
Database: db,
}
beginRes = &pb.BeginTransactionResponse{Transaction: tid}
getReq = &pb.GetDocumentRequest{
Name: db + "/documents/C/a",
ConsistencySelector: &pb.GetDocumentRequest_Transaction{tid},
getReq = &pb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: []string{db + "/documents/C/a"},
ConsistencySelector: &pb.BatchGetDocumentsRequest_Transaction{tid},
}
rollbackReq = &pb.RollbackRequest{Database: db, Transaction: tid}
commitReq = &pb.CommitRequest{Database: db, Transaction: tid}
@ -189,7 +196,7 @@ func TestTransactionErrors(t *testing.T) {
srv.reset()
srv.addRPC(beginReq, beginRes)
srv.addRPC(getReq, internalErr)
srv.addRPC(rollbackReq, grpc.Errorf(codes.FailedPrecondition, ""))
srv.addRPC(rollbackReq, status.Errorf(codes.FailedPrecondition, ""))
err = c.RunTransaction(ctx, get)
if grpc.Code(err) != codes.Internal {
t.Errorf("got <%v>, want Internal", err)
@ -198,10 +205,15 @@ func TestTransactionErrors(t *testing.T) {
// Commit has a permanent error.
srv.reset()
srv.addRPC(beginReq, beginRes)
srv.addRPC(getReq, &pb.Document{
Name: "projects/projectID/databases/(default)/documents/C/a",
CreateTime: aTimestamp,
UpdateTime: aTimestamp2,
srv.addRPC(getReq, []interface{}{
&pb.BatchGetDocumentsResponse{
Result: &pb.BatchGetDocumentsResponse_Found{&pb.Document{
Name: "projects/projectID/databases/(default)/documents/C/a",
CreateTime: aTimestamp,
UpdateTime: aTimestamp2,
}},
ReadTime: aTimestamp2,
},
})
srv.addRPC(commitReq, internalErr)
err = c.RunTransaction(ctx, get)
@ -231,6 +243,7 @@ func TestTransactionErrors(t *testing.T) {
err = c.RunTransaction(ctx, func(_ context.Context, tx *Transaction) error {
tx.Delete(c.Doc("C/a"))
it := tx.Documents(c.Collection("C").Select("x"))
defer it.Stop()
if _, err := it.Next(); err != iterator.Done {
return err
}
@ -275,7 +288,7 @@ func TestTransactionErrors(t *testing.T) {
// Too many retries.
srv.reset()
srv.addRPC(beginReq, beginRes)
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
srv.addRPC(commitReq, status.Errorf(codes.Aborted, ""))
srv.addRPC(
&pb.BeginTransactionRequest{
Database: db,
@ -287,7 +300,7 @@ func TestTransactionErrors(t *testing.T) {
},
beginRes,
)
srv.addRPC(commitReq, grpc.Errorf(codes.Aborted, ""))
srv.addRPC(commitReq, status.Errorf(codes.Aborted, ""))
srv.addRPC(rollbackReq, &empty.Empty{})
err = c.RunTransaction(ctx, func(context.Context, *Transaction) error { return nil },
MaxAttempts(2))
@ -328,6 +341,7 @@ func TestTransactionErrors(t *testing.T) {
},
func(ctx context.Context) error {
it := c.Collection("C").Documents(ctx)
defer it.Stop()
_, err := it.Next()
return err
},

@ -54,7 +54,7 @@ func mustTimestampProto(t time.Time) *tspb.Timestamp {
var cmpOpts = []cmp.Option{
cmp.AllowUnexported(DocumentRef{}, CollectionRef{}, DocumentSnapshot{},
Query{}, filter{}, order{}, fpv{}),
cmpopts.IgnoreTypes(Client{}),
cmpopts.IgnoreTypes(Client{}, &Client{}),
}
// testEqual implements equality for Firestore tests.
@ -99,7 +99,11 @@ func newMock(t *testing.T) (*Client, *mockServer) {
}
func intval(i int) *pb.Value {
return &pb.Value{&pb.Value_IntegerValue{int64(i)}}
return int64val(int64(i))
}
func int64val(i int64) *pb.Value {
return &pb.Value{&pb.Value_IntegerValue{i}}
}
func boolval(b bool) *pb.Value {

@ -15,9 +15,15 @@
package firestore
import (
"errors"
"fmt"
"io"
"log"
"sort"
"time"
"cloud.google.com/go/internal/btree"
"github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
@ -25,10 +31,39 @@ import (
"google.golang.org/grpc/status"
)
// LogWatchStreams controls whether watch stream status changes are logged.
// This feature is EXPERIMENTAL and may disappear at any time.
var LogWatchStreams bool = false
// DocumentChangeKind describes the kind of change to a document between
// query snapshots.
type DocumentChangeKind int
const (
DocumentAdded DocumentChangeKind = iota
DocumentRemoved
DocumentModified
)
// A DocumentChange describes the change to a document from one query snapshot to the next.
type DocumentChange struct {
Kind DocumentChangeKind
Doc *DocumentSnapshot
// The zero-based index of the document in the sequence of query results prior to this change,
// or -1 if the document was not present.
OldIndex int
// The zero-based index of the document in the sequence of query results after this change,
// or -1 if the document is no longer present.
NewIndex int
}
// Implementation of realtime updates (a.k.a. watch).
// This code is closely based on the Node.js implementation,
// https://github.com/googleapis/nodejs-firestore/blob/master/src/watch.js.
// The sole target ID for all streams from this client.
const watchTargetID int32 = 'g' + 'o'
var defaultBackoff = gax.Backoff{
// Values from https://github.com/googleapis/nodejs-firestore/blob/master/src/backoff.js.
Initial: 1 * time.Second,
@ -36,21 +71,387 @@ var defaultBackoff = gax.Backoff{
Multiplier: 1.5,
}
// not goroutine-safe
type watchStream struct {
ctx context.Context
c *Client
target *pb.Target // document or query being watched
lc pb.Firestore_ListenClient
backoff gax.Backoff
ctx context.Context
c *Client
lc pb.Firestore_ListenClient // the gRPC stream
target *pb.Target // document or query being watched
backoff gax.Backoff // for stream retries
err error // sticky permanent error
readTime time.Time // time of most recent snapshot
current bool // saw CURRENT, but not RESET; precondition for a snapshot
hasReturned bool // have we returned a snapshot yet?
compare func(a, b *DocumentSnapshot) (int, error) // compare documents according to query
// An ordered tree where DocumentSnapshots are the keys.
docTree *btree.BTree
// Map of document name to DocumentSnapshot for the last returned snapshot.
docMap map[string]*DocumentSnapshot
// Map of document name to DocumentSnapshot for accumulated changes for the current snapshot.
// A nil value means the document was removed.
changeMap map[string]*DocumentSnapshot
}
func newWatchStream(ctx context.Context, c *Client, target *pb.Target) *watchStream {
return &watchStream{
ctx: ctx,
c: c,
target: target,
backoff: defaultBackoff,
func newWatchStreamForDocument(ctx context.Context, dr *DocumentRef) *watchStream {
// A single document is always equal to itself.
compare := func(_, _ *DocumentSnapshot) (int, error) { return 0, nil }
return newWatchStream(ctx, dr.Parent.c, compare, &pb.Target{
TargetType: &pb.Target_Documents{
Documents: &pb.Target_DocumentsTarget{[]string{dr.Path}},
},
TargetId: watchTargetID,
})
}
func newWatchStreamForQuery(ctx context.Context, q Query) (*watchStream, error) {
qp, err := q.toProto()
if err != nil {
return nil, err
}
target := &pb.Target{
TargetType: &pb.Target_Query{
Query: &pb.Target_QueryTarget{
Parent: q.parentPath,
QueryType: &pb.Target_QueryTarget_StructuredQuery{qp},
},
},
TargetId: watchTargetID,
}
return newWatchStream(ctx, q.c, q.compareFunc(), target), nil
}
const btreeDegree = 4
func newWatchStream(ctx context.Context, c *Client, compare func(_, _ *DocumentSnapshot) (int, error), target *pb.Target) *watchStream {
w := &watchStream{
ctx: ctx,
c: c,
compare: compare,
target: target,
backoff: defaultBackoff,
docMap: map[string]*DocumentSnapshot{},
changeMap: map[string]*DocumentSnapshot{},
}
w.docTree = btree.New(btreeDegree, func(a, b interface{}) bool {
return w.less(a.(*DocumentSnapshot), b.(*DocumentSnapshot))
})
return w
}
func (s *watchStream) less(a, b *DocumentSnapshot) bool {
c, err := s.compare(a, b)
if err != nil {
s.err = err
return false
}
return c < 0
}
// Once nextSnapshot returns an error, it will always return the same error.
func (s *watchStream) nextSnapshot() (*btree.BTree, []DocumentChange, time.Time, error) {
if s.err != nil {
return nil, nil, time.Time{}, s.err
}
var changes []DocumentChange
for {
// Process messages until we are in a consistent state.
for !s.handleNextMessage() {
}
if s.err != nil {
_ = s.close() // ignore error
return nil, nil, time.Time{}, s.err
}
var newDocTree *btree.BTree
newDocTree, changes = s.computeSnapshot(s.docTree, s.docMap, s.changeMap, s.readTime)
if s.err != nil {
return nil, nil, time.Time{}, s.err
}
// Only return a snapshot if something has changed, or this is the first snapshot.
if !s.hasReturned || newDocTree != s.docTree {
s.docTree = newDocTree
break
}
}
s.changeMap = map[string]*DocumentSnapshot{}
s.hasReturned = true
return s.docTree, changes, s.readTime, nil
}
// Read a message from the stream and handle it. Return true when
// we're in a consistent state, or there is a permanent error.
func (s *watchStream) handleNextMessage() bool {
res, err := s.recv()
if err != nil {
s.err = err
// Errors returned by recv are permanent.
return true
}
switch r := res.ResponseType.(type) {
case *pb.ListenResponse_TargetChange:
return s.handleTargetChange(r.TargetChange)
case *pb.ListenResponse_DocumentChange:
name := r.DocumentChange.Document.Name
s.logf("DocumentChange %q", name)
if hasWatchTargetID(r.DocumentChange.TargetIds) { // document changed
ref, err := pathToDoc(name, s.c)
if err == nil {
s.changeMap[name], err = newDocumentSnapshot(ref, r.DocumentChange.Document, s.c, nil)
}
if err != nil {
s.err = err
return true
}
} else if hasWatchTargetID(r.DocumentChange.RemovedTargetIds) { // document removed
s.changeMap[name] = nil
}
case *pb.ListenResponse_DocumentDelete:
s.logf("Delete %q", r.DocumentDelete.Document)
s.changeMap[r.DocumentDelete.Document] = nil
case *pb.ListenResponse_DocumentRemove:
s.logf("Remove %q", r.DocumentRemove.Document)
s.changeMap[r.DocumentRemove.Document] = nil
case *pb.ListenResponse_Filter:
s.logf("Filter %d", r.Filter.Count)
if int(r.Filter.Count) != s.currentSize() {
s.resetDocs() // Remove all the current results.
// The filter didn't match; close the stream so it will be re-opened on the next
// call to nextSnapshot.
_ = s.close() // ignore error
s.lc = nil
}
default:
s.err = fmt.Errorf("unknown response type %T", r)
return true
}
return false
}
// Return true iff in a consistent state, or there is a permanent error.
func (s *watchStream) handleTargetChange(tc *pb.TargetChange) bool {
switch tc.TargetChangeType {
case pb.TargetChange_NO_CHANGE:
s.logf("TargetNoChange %d %v", len(tc.TargetIds), tc.ReadTime)
if len(tc.TargetIds) == 0 && tc.ReadTime != nil && s.current {
// Everything is up-to-date, so we are ready to return a snapshot.
rt, err := ptypes.Timestamp(tc.ReadTime)
if err != nil {
s.err = err
return true
}
s.readTime = rt
s.target.ResumeType = &pb.Target_ResumeToken{tc.ResumeToken}
return true
}
case pb.TargetChange_ADD:
s.logf("TargetAdd")
if tc.TargetIds[0] != watchTargetID {
s.err = errors.New("unexpected target ID sent by server")
return true
}
case pb.TargetChange_REMOVE:
s.logf("TargetRemove")
// We should never see a remove.
if tc.Cause != nil {
s.err = status.Error(codes.Code(tc.Cause.Code), tc.Cause.Message)
} else {
s.err = status.Error(codes.Internal, "firestore: client saw REMOVE")
}
return true
// The targets reflect all changes committed before the targets were added
// to the stream.
case pb.TargetChange_CURRENT:
s.logf("TargetCurrent")
s.current = true
// The targets have been reset, and a new initial state for the targets will be
// returned in subsequent changes. Whatever changes have happened so far no
// longer matter.
case pb.TargetChange_RESET:
s.logf("TargetReset")
s.resetDocs()
default:
s.err = fmt.Errorf("firestore: unknown TargetChange type %s", tc.TargetChangeType)
return true
}
// If we see a resume token and our watch ID is affected, we assume the stream
// is now healthy, so we reset our backoff time to the minimum.
if tc.ResumeToken != nil && (len(tc.TargetIds) == 0 || hasWatchTargetID(tc.TargetIds)) {
s.backoff = defaultBackoff
}
return false // not in a consistent state, keep receiving
}
func (s *watchStream) resetDocs() {
s.target.ResumeType = nil // clear resume token
s.current = false
s.changeMap = map[string]*DocumentSnapshot{}
// Mark each document as deleted. If documents are not deleted, they
// will be send again by the server.
it := s.docTree.BeforeIndex(0)
for it.Next() {
s.changeMap[it.Key.(*DocumentSnapshot).Ref.Path] = nil
}
}
func (s *watchStream) currentSize() int {
_, adds, deletes := extractChanges(s.docMap, s.changeMap)
return len(s.docMap) + len(adds) - len(deletes)
}
// Return the changes that have occurred since the last snapshot.
func extractChanges(docMap, changeMap map[string]*DocumentSnapshot) (updates, adds []*DocumentSnapshot, deletes []string) {
for name, doc := range changeMap {
switch {
case doc == nil:
if _, ok := docMap[name]; ok {
deletes = append(deletes, name)
}
case docMap[name] != nil:
updates = append(updates, doc)
default:
adds = append(adds, doc)
}
}
return updates, adds, deletes
}
// For development only.
// TODO(jba): remove.
func assert(b bool) {
if !b {
panic("assertion failed")
}
}
// Applies the mutations in changeMap to both the document tree and the
// document lookup map. Modifies docMap in place and returns a new docTree.
// If there were no changes, returns docTree unmodified.
func (s *watchStream) computeSnapshot(docTree *btree.BTree, docMap, changeMap map[string]*DocumentSnapshot, readTime time.Time) (*btree.BTree, []DocumentChange) {
var changes []DocumentChange
updatedTree := docTree
assert(docTree.Len() == len(docMap))
updates, adds, deletes := extractChanges(docMap, changeMap)
if len(adds) > 0 || len(deletes) > 0 {
updatedTree = docTree.Clone()
}
// Process the sorted changes in the order that is expected by our clients
// (removals, additions, and then modifications). We also need to sort the
// individual changes to assure that oldIndex/newIndex keep incrementing.
deldocs := make([]*DocumentSnapshot, len(deletes))
for i, d := range deletes {
deldocs[i] = docMap[d]
}
sort.Sort(byLess{deldocs, s.less})
for _, oldDoc := range deldocs {
assert(oldDoc != nil)
delete(docMap, oldDoc.Ref.Path)
_, oldi := updatedTree.GetWithIndex(oldDoc)
// TODO(jba): have btree.Delete return old index
_, found := updatedTree.Delete(oldDoc)
assert(found)
changes = append(changes, DocumentChange{
Kind: DocumentRemoved,
Doc: oldDoc,
OldIndex: oldi,
NewIndex: -1,
})
}
sort.Sort(byLess{adds, s.less})
for _, newDoc := range adds {
name := newDoc.Ref.Path
assert(docMap[name] == nil)
newDoc.ReadTime = readTime
docMap[name] = newDoc
updatedTree.Set(newDoc, nil)
// TODO(jba): change btree so Set returns index as second value.
_, newi := updatedTree.GetWithIndex(newDoc)
changes = append(changes, DocumentChange{
Kind: DocumentAdded,
Doc: newDoc,
OldIndex: -1,
NewIndex: newi,
})
}
sort.Sort(byLess{updates, s.less})
for _, newDoc := range updates {
name := newDoc.Ref.Path
oldDoc := docMap[name]
assert(oldDoc != nil)
if newDoc.UpdateTime.Equal(oldDoc.UpdateTime) {
continue
}
if updatedTree == docTree {
updatedTree = docTree.Clone()
}
newDoc.ReadTime = readTime
docMap[name] = newDoc
_, oldi := updatedTree.GetWithIndex(oldDoc)
updatedTree.Delete(oldDoc)
updatedTree.Set(newDoc, nil)
_, newi := updatedTree.GetWithIndex(newDoc)
changes = append(changes, DocumentChange{
Kind: DocumentModified,
Doc: newDoc,
OldIndex: oldi,
NewIndex: newi,
})
}
assert(updatedTree.Len() == len(docMap))
return updatedTree, changes
}
type byLess struct {
s []*DocumentSnapshot
less func(a, b *DocumentSnapshot) bool
}
func (b byLess) Len() int { return len(b.s) }
func (b byLess) Swap(i, j int) { b.s[i], b.s[j] = b.s[j], b.s[i] }
func (b byLess) Less(i, j int) bool { return b.less(b.s[i], b.s[j]) }
func hasWatchTargetID(ids []int32) bool {
for _, id := range ids {
if id == watchTargetID {
return true
}
}
return false
}
func (s *watchStream) logf(format string, args ...interface{}) {
if LogWatchStreams {
log.Printf(format, args...)
}
}
// Close the stream. From this point on, calls to nextSnapshot will return
// io.EOF, or the error from CloseSend.
func (s *watchStream) stop() {
err := s.close()
if s.err != nil { // don't change existing error
return
}
if err != nil {
s.err = err
}
s.err = io.EOF // normal shutdown
}
func (s *watchStream) close() error {
if s.lc == nil {
return nil
}
return s.lc.CloseSend()
}
// recv receives the next message from the stream. It also handles opening the stream
@ -71,15 +472,13 @@ func (s *watchStream) recv() (*pb.ListenResponse, error) {
return res, err
}
// Non-permanent error. Sleep and retry.
// TODO: from node:
// request.addTarget.resumeToken = resumeToken;
// changeMap.clear();
s.changeMap = map[string]*DocumentSnapshot{} // clear changeMap
dur := s.backoff.Pause()
// If we're out of quota, wait a long time before retrying.
if status.Code(err) == codes.ResourceExhausted {
dur = s.backoff.Max
}
if err := gax.Sleep(s.ctx, dur); err != nil {
if err := sleep(s.ctx, dur); err != nil {
return nil, err
}
s.lc = nil
@ -87,10 +486,11 @@ func (s *watchStream) recv() (*pb.ListenResponse, error) {
}
func (s *watchStream) open() (pb.Firestore_ListenClient, error) {
lc, err := s.c.c.Listen(s.ctx)
dbPath := s.c.path()
lc, err := s.c.c.Listen(withResourceHeader(s.ctx, dbPath))
if err == nil {
err = lc.Send(&pb.ListenRequest{
Database: s.c.path(),
Database: dbPath,
TargetChange: &pb.ListenRequest_AddTarget{AddTarget: s.target},
})
}
@ -106,7 +506,7 @@ func isPermanentWatchError(err error) bool {
return false
}
switch status.Code(err) {
case codes.Canceled, codes.Unknown, codes.DeadlineExceeded, codes.ResourceExhausted,
case codes.Unknown, codes.DeadlineExceeded, codes.ResourceExhausted,
codes.Internal, codes.Unavailable, codes.Unauthenticated:
return false
default:

@ -15,8 +15,11 @@
package firestore
import (
"sort"
"testing"
"time"
"cloud.google.com/go/internal/btree"
"github.com/golang/protobuf/proto"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
@ -32,7 +35,7 @@ func TestWatchRecv(t *testing.T) {
defaultBackoff = gax.Backoff{Initial: 1, Max: 1, Multiplier: 1}
defer func() { defaultBackoff = db }()
ws := newWatchStream(ctx, c, &pb.Target{})
ws := newWatchStream(ctx, c, nil, &pb.Target{})
request := &pb.ListenRequest{
Database: "projects/projectID/databases/(default)",
TargetChange: &pb.ListenRequest_AddTarget{&pb.Target{}},
@ -64,3 +67,260 @@ func TestWatchRecv(t *testing.T) {
t.Fatalf("got %s, want %s", got, want)
}
}
func TestComputeSnapshot(t *testing.T) {
c := &Client{
projectID: "projID",
databaseID: "(database)",
}
ws := newWatchStream(context.Background(), c, nil, &pb.Target{})
tm := time.Now()
i := 0
doc := func(path, value string) *DocumentSnapshot {
i++
return &DocumentSnapshot{
Ref: c.Doc(path),
proto: &pb.Document{Fields: map[string]*pb.Value{"foo": strval(value)}},
UpdateTime: tm.Add(time.Duration(i) * time.Second), // need unique time for updates
}
}
val := func(d *DocumentSnapshot) string { return d.proto.Fields["foo"].GetStringValue() }
less := func(a, b *DocumentSnapshot) bool { return val(a) < val(b) }
type dmap map[string]*DocumentSnapshot
ds1 := doc("C/d1", "a")
ds2 := doc("C/d2", "b")
ds2c := doc("C/d2", "c")
docTree := btree.New(4, func(a, b interface{}) bool { return less(a.(*DocumentSnapshot), b.(*DocumentSnapshot)) })
var gotChanges []DocumentChange
docMap := dmap{}
// The following test cases are not independent; each builds on the output of the previous.
for _, test := range []struct {
desc string
changeMap dmap
wantDocs []*DocumentSnapshot
wantChanges []DocumentChange
}{
{
"no changes",
nil,
nil,
nil,
},
{
"add a doc",
dmap{ds1.Ref.Path: ds1},
[]*DocumentSnapshot{ds1},
[]DocumentChange{{Kind: DocumentAdded, Doc: ds1, OldIndex: -1, NewIndex: 0}},
},
{
"add, remove",
dmap{ds1.Ref.Path: nil, ds2.Ref.Path: ds2},
[]*DocumentSnapshot{ds2},
[]DocumentChange{
{Kind: DocumentRemoved, Doc: ds1, OldIndex: 0, NewIndex: -1},
{Kind: DocumentAdded, Doc: ds2, OldIndex: -1, NewIndex: 0},
},
},
{
"add back, modify",
dmap{ds1.Ref.Path: ds1, ds2c.Ref.Path: ds2c},
[]*DocumentSnapshot{ds1, ds2c},
[]DocumentChange{
{Kind: DocumentAdded, Doc: ds1, OldIndex: -1, NewIndex: 0},
{Kind: DocumentModified, Doc: ds2c, OldIndex: 1, NewIndex: 1},
},
},
} {
docTree, gotChanges = ws.computeSnapshot(docTree, docMap, test.changeMap, time.Time{})
gotDocs := treeDocs(docTree)
if diff := testDiff(gotDocs, test.wantDocs); diff != "" {
t.Fatalf("%s: %s", test.desc, diff)
}
mgot := mapDocs(docMap, less)
if diff := testDiff(gotDocs, mgot); diff != "" {
t.Fatalf("%s: docTree and docMap disagree: %s", test.desc, diff)
}
if diff := testDiff(gotChanges, test.wantChanges); diff != "" {
t.Fatalf("%s: %s", test.desc, diff)
}
}
// Verify that if there are no changes, the returned docTree is identical to the first arg.
// docTree already has ds2c.
got, _ := ws.computeSnapshot(docTree, docMap, dmap{ds2c.Ref.Path: ds2c}, time.Time{})
if got != docTree {
t.Error("returned docTree != arg docTree")
}
}
func treeDocs(bt *btree.BTree) []*DocumentSnapshot {
var ds []*DocumentSnapshot
it := bt.BeforeIndex(0)
for it.Next() {
ds = append(ds, it.Key.(*DocumentSnapshot))
}
return ds
}
func mapDocs(m map[string]*DocumentSnapshot, less func(a, b *DocumentSnapshot) bool) []*DocumentSnapshot {
var ds []*DocumentSnapshot
for _, d := range m {
ds = append(ds, d)
}
sort.Sort(byLess{ds, less})
return ds
}
func TestWatchStream(t *testing.T) {
// Preliminary, very basic tests. Will expand and turn into cross-language tests
// later.
ctx := context.Background()
c, srv := newMock(t)
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
baseTime := time.Now()
readTime := baseTime.Add(5 * time.Second)
readTimestamp := mustTimestampProto(readTime)
doc := func(path string, value int, tm time.Time) *DocumentSnapshot {
ref := c.Doc(path)
ts := mustTimestampProto(tm)
return &DocumentSnapshot{
Ref: ref,
proto: &pb.Document{
Name: ref.Path,
Fields: map[string]*pb.Value{"foo": intval(value)},
CreateTime: ts,
UpdateTime: ts,
},
CreateTime: tm,
UpdateTime: tm,
ReadTime: readTime,
}
}
change := func(ds *DocumentSnapshot) *pb.ListenResponse {
return &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentChange{&pb.DocumentChange{
Document: ds.proto,
TargetIds: []int32{watchTargetID},
}}}
}
del := func(ds *DocumentSnapshot) *pb.ListenResponse {
return &pb.ListenResponse{ResponseType: &pb.ListenResponse_DocumentDelete{&pb.DocumentDelete{
Document: ds.Ref.Path,
}}}
}
q := Query{c: c, collectionID: "x"}
current := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{
TargetChangeType: pb.TargetChange_CURRENT,
}}}
noChange := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{
TargetChangeType: pb.TargetChange_NO_CHANGE,
ReadTime: readTimestamp,
}}}
doc1 := doc("C/d1", 1, baseTime)
doc1a := doc("C/d1", 2, baseTime.Add(time.Second))
doc2 := doc("C/d2", 3, baseTime)
for _, test := range []struct {
desc string
responses []interface{}
want []*DocumentSnapshot
}{
{
"no changes: empty btree",
[]interface{}{current, noChange},
nil,
},
{
"add a doc",
[]interface{}{change(doc1), current, noChange},
[]*DocumentSnapshot{doc1},
},
{
"add a doc, then remove it",
[]interface{}{change(doc1), del(doc1), current, noChange},
[]*DocumentSnapshot(nil),
},
{
"add a doc, then add another one",
[]interface{}{change(doc1), change(doc2), current, noChange},
[]*DocumentSnapshot{doc1, doc2},
},
{
"add a doc, then change it",
[]interface{}{change(doc1), change(doc1a), current, noChange},
[]*DocumentSnapshot{doc1a},
},
} {
ws, err := newWatchStreamForQuery(ctx, q)
if err != nil {
t.Fatal(err)
}
request := &pb.ListenRequest{
Database: "projects/projectID/databases/(default)",
TargetChange: &pb.ListenRequest_AddTarget{ws.target},
}
srv.addRPC(request, test.responses)
tree, _, _, err := ws.nextSnapshot()
if err != nil {
t.Fatalf("%s: %v", test.desc, err)
}
got := treeDocs(tree)
if diff := testDiff(got, test.want); diff != "" {
t.Errorf("%s: %s", test.desc, diff)
}
}
}
func TestWatchCancel(t *testing.T) {
// Canceling the context of a watch should result in a codes.Canceled error from the next
// call to the iterator's Next method.
ctx := context.Background()
c, srv := newMock(t)
q := Query{c: c, collectionID: "x"}
// Cancel before open.
ctx2, cancel := context.WithCancel(ctx)
ws, err := newWatchStreamForQuery(ctx2, q)
if err != nil {
t.Fatal(err)
}
cancel()
_, _, _, err = ws.nextSnapshot()
codeEq(t, "cancel before open", codes.Canceled, err)
request := &pb.ListenRequest{
Database: "projects/projectID/databases/(default)",
TargetChange: &pb.ListenRequest_AddTarget{ws.target},
}
current := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{
TargetChangeType: pb.TargetChange_CURRENT,
}}}
noChange := &pb.ListenResponse{ResponseType: &pb.ListenResponse_TargetChange{&pb.TargetChange{
TargetChangeType: pb.TargetChange_NO_CHANGE,
ReadTime: aTimestamp,
}}}
// Cancel from gax.Sleep. We should still see a gRPC error with codes.Canceled, not a
// context.Canceled error.
ctx2, cancel = context.WithCancel(ctx)
ws, err = newWatchStreamForQuery(ctx2, q)
if err != nil {
t.Fatal(err)
}
srv.addRPC(request, []interface{}{current, noChange})
_, _, _, _ = ws.nextSnapshot()
cancel()
// Because of how the mock works, the following results in an EOF on the stream, which
// is a non-permanent error that causes a retry. That retry ends up in gax.Sleep, which
// finds that the context is done and returns ctx.Err(), which is context.Canceled.
// Verify that we transform that context.Canceled into a gRPC Status with code Canceled.
_, _, _, err = ws.nextSnapshot()
codeEq(t, "cancel from gax.Sleep", codes.Canceled, err)
// TODO(jba): Test that we get codes.Canceled when canceling an RPC.
// We had a test for this in a21236af, but it was flaky for unclear reasons.
}

11
vendor/cloud.google.com/go/internal/btree/README.md generated vendored Normal file

@ -0,0 +1,11 @@
This package is a fork of github.com/jba/btree at commit
d4edd57f39b8425fc2c631047ff4dc6024d82a4f, which itself was a fork of
github.com/google/btree at 316fb6d3f031ae8f4d457c6c5186b9e3ded70435.
This directory makes the following modifications:
- Updated copyright notice.
- removed LICENSE (it is the same as the repo-wide license, Apache 2.0)
- Removed examples_test.go and .travis.yml.
- Added this file.

@ -0,0 +1,268 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
package btree
import (
"fmt"
"sort"
"testing"
)
const benchmarkTreeSize = 10000
var degrees = []int{2, 8, 32, 64}
func BenchmarkInsert(b *testing.B) {
insertP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
i := 0
for i < b.N {
tr := New(d, less)
for _, m := range insertP {
tr.Set(m.Key, m.Value)
i++
if i >= b.N {
return
}
}
}
})
}
}
func BenchmarkDeleteInsert(b *testing.B) {
insertP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
tr := New(d, less)
for _, m := range insertP {
tr.Set(m.Key, m.Value)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := insertP[i%benchmarkTreeSize]
tr.Delete(m.Key)
tr.Set(m.Key, m.Value)
}
})
}
}
func BenchmarkDeleteInsertCloneOnce(b *testing.B) {
insertP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
tr := New(d, less)
for _, m := range insertP {
tr.Set(m.Key, m.Value)
}
tr = tr.Clone()
b.ResetTimer()
for i := 0; i < b.N; i++ {
m := insertP[i%benchmarkTreeSize]
tr.Delete(m.Key)
tr.Set(m.Key, m.Value)
}
})
}
}
func BenchmarkDeleteInsertCloneEachTime(b *testing.B) {
insertP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
tr := New(d, less)
for _, m := range insertP {
tr.Set(m.Key, m.Value)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
tr = tr.Clone()
m := insertP[i%benchmarkTreeSize]
tr.Delete(m.Key)
tr.Set(m.Key, m.Value)
}
})
}
}
func BenchmarkDelete(b *testing.B) {
insertP := perm(benchmarkTreeSize)
removeP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
i := 0
for i < b.N {
b.StopTimer()
tr := New(d, less)
for _, v := range insertP {
tr.Set(v.Key, v.Value)
}
b.StartTimer()
for _, m := range removeP {
tr.Delete(m.Key)
i++
if i >= b.N {
return
}
}
if tr.Len() > 0 {
panic(tr.Len())
}
}
})
}
}
func BenchmarkGet(b *testing.B) {
insertP := perm(benchmarkTreeSize)
getP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
i := 0
for i < b.N {
b.StopTimer()
tr := New(d, less)
for _, v := range insertP {
tr.Set(v.Key, v.Value)
}
b.StartTimer()
for _, m := range getP {
tr.Get(m.Key)
i++
if i >= b.N {
return
}
}
}
})
}
}
func BenchmarkGetWithIndex(b *testing.B) {
insertP := perm(benchmarkTreeSize)
getP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
i := 0
for i < b.N {
b.StopTimer()
tr := New(d, less)
for _, v := range insertP {
tr.Set(v.Key, v.Value)
}
b.StartTimer()
for _, m := range getP {
tr.GetWithIndex(m.Key)
i++
if i >= b.N {
return
}
}
}
})
}
}
func BenchmarkGetCloneEachTime(b *testing.B) {
insertP := perm(benchmarkTreeSize)
getP := perm(benchmarkTreeSize)
for _, d := range degrees {
b.Run(fmt.Sprintf("degree=%d", d), func(b *testing.B) {
i := 0
for i < b.N {
b.StopTimer()
tr := New(d, less)
for _, m := range insertP {
tr.Set(m.Key, m.Value)
}
b.StartTimer()
for _, m := range getP {
tr = tr.Clone()
tr.Get(m.Key)
i++
if i >= b.N {
return
}
}
}
})
}
}
func BenchmarkFind(b *testing.B) {
for _, d := range degrees {
var items []item
for i := 0; i < 2*d; i++ {
items = append(items, item{i, i})
}
b.Run(fmt.Sprintf("size=%d", len(items)), func(b *testing.B) {
for _, alg := range []struct {
name string
fun func(Key, []item) (int, bool)
}{
{"binary", findBinary},
{"linear", findLinear},
} {
b.Run(alg.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
for j := 0; j < len(items); j++ {
alg.fun(items[j].key, items)
}
}
})
}
})
}
}
func findBinary(k Key, s []item) (int, bool) {
i := sort.Search(len(s), func(i int) bool { return less(k, s[i].key) })
// i is the smallest index of s for which key.Less(s[i].Key), or len(s).
if i > 0 && !less(s[i-1], k) {
return i - 1, true
}
return i, false
}
func findLinear(k Key, s []item) (int, bool) {
var i int
for i = 0; i < len(s); i++ {
if less(k, s[i].key) {
break
}
}
if i > 0 && !less(s[i-1].key, k) {
return i - 1, true
}
return i, false
}
type byInts []item
func (a byInts) Len() int {
return len(a)
}
func (a byInts) Less(i, j int) bool {
return a[i].key.(int) < a[j].key.(int)
}
func (a byInts) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}

1018
vendor/cloud.google.com/go/internal/btree/btree.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

422
vendor/cloud.google.com/go/internal/btree/btree_test.go generated vendored Normal file

@ -0,0 +1,422 @@
// Copyright 2014 Google Inc.
// Modified 2018 by Jonathan Amsterdam (jbamsterdam@gmail.com)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package btree
import (
"flag"
"fmt"
"math/rand"
"os"
"sort"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
func init() {
seed := time.Now().Unix()
fmt.Println(seed)
rand.Seed(seed)
}
type itemWithIndex struct {
Key Key
Value Value
Index int
}
// perm returns a random permutation of n Int items in the range [0, n).
func perm(n int) []itemWithIndex {
var out []itemWithIndex
for _, v := range rand.Perm(n) {
out = append(out, itemWithIndex{v, v, v})
}
return out
}
// rang returns an ordered list of Int items in the range [0, n).
func rang(n int) []itemWithIndex {
var out []itemWithIndex
for i := 0; i < n; i++ {
out = append(out, itemWithIndex{i, i, i})
}
return out
}
// all extracts all items from an iterator.
func all(it *Iterator) []itemWithIndex {
var out []itemWithIndex
for it.Next() {
out = append(out, itemWithIndex{it.Key, it.Value, it.Index})
}
return out
}
// rangerev returns a reversed ordered list of Int items in the range [0, n).
func rangrev(n int) []itemWithIndex {
var out []itemWithIndex
for i := n - 1; i >= 0; i-- {
out = append(out, itemWithIndex{i, i, i})
}
return out
}
func reverse(s []itemWithIndex) {
for i := 0; i < len(s)/2; i++ {
s[i], s[len(s)-i-1] = s[len(s)-i-1], s[i]
}
}
var btreeDegree = flag.Int("degree", 32, "B-Tree degree")
func TestBTree(t *testing.T) {
tr := New(*btreeDegree, less)
const treeSize = 10000
for i := 0; i < 10; i++ {
if min, _ := tr.Min(); min != nil {
t.Fatalf("empty min, got %+v", min)
}
if max, _ := tr.Max(); max != nil {
t.Fatalf("empty max, got %+v", max)
}
for _, m := range perm(treeSize) {
if _, ok := tr.Set(m.Key, m.Value); ok {
t.Fatal("set found item", m)
}
}
for _, m := range perm(treeSize) {
_, ok, idx := tr.SetWithIndex(m.Key, m.Value)
if !ok {
t.Fatal("set didn't find item", m)
}
if idx != m.Index {
t.Fatalf("got index %d, want %d", idx, m.Index)
}
}
mink, minv := tr.Min()
if want := 0; mink != want || minv != want {
t.Fatalf("min: want %+v, got %+v, %+v", want, mink, minv)
}
maxk, maxv := tr.Max()
if want := treeSize - 1; maxk != want || maxv != want {
t.Fatalf("max: want %+v, got %+v, %+v", want, maxk, maxv)
}
got := all(tr.BeforeIndex(0))
want := rang(treeSize)
if !cmp.Equal(got, want) {
t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want)
}
for _, m := range perm(treeSize) {
if _, removed := tr.Delete(m.Key); !removed {
t.Fatalf("didn't find %v", m)
}
}
if got = all(tr.BeforeIndex(0)); len(got) > 0 {
t.Fatalf("some left!: %v", got)
}
}
}
func TestAt(t *testing.T) {
tr := New(*btreeDegree, less)
for _, m := range perm(100) {
tr.Set(m.Key, m.Value)
}
for i := 0; i < tr.Len(); i++ {
gotk, gotv := tr.At(i)
if want := i; gotk != want || gotv != want {
t.Fatalf("At(%d) = (%v, %v), want (%v, %v)", i, gotk, gotv, want, want)
}
}
}
func TestGetWithIndex(t *testing.T) {
tr := New(*btreeDegree, less)
for _, m := range perm(100) {
tr.Set(m.Key, m.Value)
}
for i := 0; i < tr.Len(); i++ {
gotv, goti := tr.GetWithIndex(i)
wantv, wanti := i, i
if gotv != wantv || goti != wanti {
t.Errorf("GetWithIndex(%d) = (%v, %v), want (%v, %v)",
i, gotv, goti, wantv, wanti)
}
}
_, got := tr.GetWithIndex(100)
if want := -1; got != want {
t.Errorf("got %d, want %d", got, want)
}
}
func TestSetWithIndex(t *testing.T) {
tr := New(4, less) // use a small degree to cover more cases
var contents []int
for _, m := range perm(100) {
_, _, idx := tr.SetWithIndex(m.Key, m.Value)
contents = append(contents, m.Index)
sort.Ints(contents)
want := -1
for i, c := range contents {
if c == m.Index {
want = i
break
}
}
if idx != want {
t.Fatalf("got %d, want %d", idx, want)
}
}
}
func TestDeleteMin(t *testing.T) {
tr := New(3, less)
for _, m := range perm(100) {
tr.Set(m.Key, m.Value)
}
var got []itemWithIndex
for i := 0; tr.Len() > 0; i++ {
k, v := tr.DeleteMin()
got = append(got, itemWithIndex{k, v, i})
}
if want := rang(100); !cmp.Equal(got, want) {
t.Fatalf("got: %v\nwant: %v", got, want)
}
}
func TestDeleteMax(t *testing.T) {
tr := New(3, less)
for _, m := range perm(100) {
tr.Set(m.Key, m.Value)
}
var got []itemWithIndex
for tr.Len() > 0 {
k, v := tr.DeleteMax()
got = append(got, itemWithIndex{k, v, tr.Len()})
}
reverse(got)
if want := rang(100); !cmp.Equal(got, want) {
t.Fatalf("got: %v\nwant: %v", got, want)
}
}
func TestIterator(t *testing.T) {
const size = 10
tr := New(2, less)
// Empty tree.
for i, it := range []*Iterator{
tr.BeforeIndex(0),
tr.Before(3),
tr.After(3),
} {
if got, want := it.Next(), false; got != want {
t.Errorf("empty, #%d: got %t, want %t", i, got, want)
}
}
// Root with zero children.
tr.Set(1, nil)
tr.Delete(1)
if !(tr.root != nil && len(tr.root.children) == 0 && len(tr.root.items) == 0) {
t.Fatal("wrong shape tree")
}
for i, it := range []*Iterator{
tr.BeforeIndex(0),
tr.Before(3),
tr.After(3),
} {
if got, want := it.Next(), false; got != want {
t.Errorf("zero root, #%d: got %t, want %t", i, got, want)
}
}
// Tree with size elements.
p := perm(size)
for _, v := range p {
tr.Set(v.Key, v.Value)
}
it := tr.BeforeIndex(0)
got := all(it)
want := rang(size)
if !cmp.Equal(got, want) {
t.Fatalf("got %+v\nwant %+v\n", got, want)
}
for i, w := range want {
it := tr.Before(w.Key)
got = all(it)
wn := want[w.Key.(int):]
if !cmp.Equal(got, wn) {
t.Fatalf("got %+v\nwant %+v\n", got, wn)
}
it = tr.BeforeIndex(i)
got = all(it)
if !cmp.Equal(got, wn) {
t.Fatalf("got %+v\nwant %+v\n", got, wn)
}
it = tr.After(w.Key)
got = all(it)
wn = append([]itemWithIndex(nil), want[:w.Key.(int)+1]...)
reverse(wn)
if !cmp.Equal(got, wn) {
t.Fatalf("got %+v\nwant %+v\n", got, wn)
}
it = tr.AfterIndex(i)
got = all(it)
if !cmp.Equal(got, wn) {
t.Fatalf("got %+v\nwant %+v\n", got, wn)
}
}
// Non-existent keys.
tr = New(2, less)
for _, v := range p {
tr.Set(v.Key.(int)*2, v.Value)
}
// tr has only even keys: 0, 2, 4, ... Iterate from odd keys.
for i := -1; i <= size+1; i += 2 {
it := tr.Before(i)
got := all(it)
var want []itemWithIndex
for j := (i + 1) / 2; j < size; j++ {
want = append(want, itemWithIndex{j * 2, j, j})
}
if !cmp.Equal(got, want) {
tr.print(os.Stdout)
t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want)
}
it = tr.After(i)
got = all(it)
want = nil
for j := (i - 1) / 2; j >= 0; j-- {
want = append(want, itemWithIndex{j * 2, j, j})
}
if !cmp.Equal(got, want) {
t.Fatalf("%d: got %+v\nwant %+v\n", i, got, want)
}
}
}
func TestMixed(t *testing.T) {
// Test random, mixed insertions and deletions.
const maxSize = 1000
tr := New(3, less)
has := map[int]bool{}
for i := 0; i < 10000; i++ {
r := rand.Intn(maxSize)
if r >= tr.Len() {
old, ok := tr.Set(r, r)
if has[r] != ok {
t.Fatalf("%d: has=%t, ok=%t", r, has[r], ok)
}
if ok && old.(int) != r {
t.Fatalf("%d: bad old", r)
}
has[r] = true
if got, want := tr.Get(r), r; got != want {
t.Fatalf("Get(%d) = %d, want %d", r, got, want)
}
} else {
// Expoit random map iteration order.
var d int
for d = range has {
break
}
old, removed := tr.Delete(d)
if !removed {
t.Fatalf("%d not found", d)
}
if old.(int) != d {
t.Fatalf("%d: bad old", d)
}
delete(has, d)
}
}
}
const cloneTestSize = 10000
func cloneTest(t *testing.T, b *BTree, start int, p []itemWithIndex, wg *sync.WaitGroup, treec chan<- *BTree) {
treec <- b
for i := start; i < cloneTestSize; i++ {
b.Set(p[i].Key, p[i].Value)
if i%(cloneTestSize/5) == 0 {
wg.Add(1)
go cloneTest(t, b.Clone(), i+1, p, wg, treec)
}
}
wg.Done()
}
func TestCloneConcurrentOperations(t *testing.T) {
b := New(*btreeDegree, less)
treec := make(chan *BTree)
p := perm(cloneTestSize)
var wg sync.WaitGroup
wg.Add(1)
go cloneTest(t, b, 0, p, &wg, treec)
var trees []*BTree
donec := make(chan struct{})
go func() {
for t := range treec {
trees = append(trees, t)
}
close(donec)
}()
wg.Wait()
close(treec)
<-donec
want := rang(cloneTestSize)
for i, tree := range trees {
if !cmp.Equal(want, all(tree.BeforeIndex(0))) {
t.Errorf("tree %v mismatch", i)
}
}
toRemove := rang(cloneTestSize)[cloneTestSize/2:]
for i := 0; i < len(trees)/2; i++ {
tree := trees[i]
wg.Add(1)
go func() {
for _, m := range toRemove {
tree.Delete(m.Key)
}
wg.Done()
}()
}
wg.Wait()
for i, tree := range trees {
var wantpart []itemWithIndex
if i < len(trees)/2 {
wantpart = want[:cloneTestSize/2]
} else {
wantpart = want
}
if got := all(tree.BeforeIndex(0)); !cmp.Equal(wantpart, got) {
t.Errorf("tree %v mismatch, want %v got %v", i, len(want), len(got))
}
}
}
func less(a, b interface{}) bool { return a.(int) < b.(int) }

37
vendor/cloud.google.com/go/internal/btree/debug.go generated vendored Normal file

@ -0,0 +1,37 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package btree
import (
"fmt"
"io"
"strings"
)
func (t *BTree) print(w io.Writer) {
t.root.print(w, 0)
}
func (n *node) print(w io.Writer, level int) {
indent := strings.Repeat(" ", level)
if n == nil {
fmt.Fprintf(w, "%s<nil>\n", indent)
return
}
fmt.Fprintf(w, "%s%v\n", indent, n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}

64
vendor/cloud.google.com/go/internal/testutil/go18.go generated vendored Normal file

@ -0,0 +1,64 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package testutil
import (
"log"
"time"
"go.opencensus.io/plugin/ocgrpc"
"go.opencensus.io/stats/view"
"go.opencensus.io/trace"
)
type TestExporter struct {
Spans []*trace.SpanData
Stats chan *view.Data
}
func NewTestExporter() *TestExporter {
te := &TestExporter{Stats: make(chan *view.Data)}
view.RegisterExporter(te)
view.SetReportingPeriod(time.Millisecond)
if err := view.Register(ocgrpc.ClientRequestCountView); err != nil {
log.Fatal(err)
}
trace.RegisterExporter(te)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
return te
}
func (te *TestExporter) ExportSpan(s *trace.SpanData) {
te.Spans = append(te.Spans, s)
}
func (te *TestExporter) ExportView(vd *view.Data) {
if len(vd.Rows) > 0 {
select {
case te.Stats <- vd:
default:
}
}
}
func (te *TestExporter) Unregister() {
view.UnregisterExporter(te)
trace.UnregisterExporter(te)
}

@ -20,8 +20,9 @@ import (
"net"
"strconv"
grpc "google.golang.org/grpc"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// A Server is an in-process gRPC server, listening on a system-chosen port on
@ -90,7 +91,7 @@ func PageBounds(pageSize int, pageToken string, length int) (from, to int, nextP
if pageToken != "" {
from, err = strconv.Atoi(pageToken)
if err != nil {
return 0, 0, "", grpc.Errorf(codes.InvalidArgument, "bad page token: %v", err)
return 0, 0, "", status.Errorf(codes.InvalidArgument, "bad page token: %v", err)
}
if from >= length {
return length, length, "", nil

83
vendor/cloud.google.com/go/internal/trace/go18.go generated vendored Normal file

@ -0,0 +1,83 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// ToStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}

55
vendor/cloud.google.com/go/internal/trace/go18_test.go generated vendored Normal file

@ -0,0 +1,55 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"errors"
"net/http"
"testing"
"cloud.google.com/go/internal/testutil"
octrace "go.opencensus.io/trace"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestToStatus(t *testing.T) {
for _, testcase := range []struct {
input error
want octrace.Status
}{
{
errors.New("some random error"),
octrace.Status{Code: int32(code.Code_UNKNOWN), Message: "some random error"},
},
{
&googleapi.Error{Code: http.StatusConflict, Message: "some specific googleapi http error"},
octrace.Status{Code: int32(code.Code_ALREADY_EXISTS), Message: "some specific googleapi http error"},
},
{
status.Error(codes.DataLoss, "some specific grpc error"),
octrace.Status{Code: int32(code.Code_DATA_LOSS), Message: "some specific grpc error"},
},
} {
got := toStatus(testcase.input)
if r := testutil.Diff(got, testcase.want); r != "" {
t.Errorf("got -, want +:\n%s", r)
}
}
}

30
vendor/cloud.google.com/go/internal/trace/not_go18.go generated vendored Normal file

@ -0,0 +1,30 @@
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

17
vendor/cloud.google.com/go/issue_template.md generated vendored Normal file

@ -0,0 +1,17 @@
(delete this for feature requests)
## Client
e.g. PubSub
## Describe Your Environment
e.g. Alpine Docker on GKE
## Expected Behavior
e.g. Messages arrive really fast.
## Actual Behavior
e.g. Messages arrive really slowly.

@ -79,6 +79,12 @@ const (
// DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption.
DefaultBufferedByteLimit = 1 << 30 // 1GiB
// defaultWriteTimeout is the timeout for the underlying write API calls. As
// write API calls are not idempotent, they are not retried on timeout. This
// timeout is to allow clients to degrade gracefully if underlying logging
// service is temporarily impaired for some reason.
defaultWriteTimeout = 10 * time.Minute
)
// For testing:
@ -228,6 +234,7 @@ type Logger struct {
// Options
commonResource *mrpb.MonitoredResource
commonLabels map[string]string
writeTimeout time.Duration
}
// A LoggerOption is a configuration option for a Logger.
@ -406,10 +413,8 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger {
logName: internal.LogPath(c.parent, logID),
commonResource: r,
}
// TODO(jba): determine the right context for the bundle handler.
ctx := context.TODO()
l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) {
l.writeLogEntries(ctx, entries.([]*logpb.LogEntry))
l.writeLogEntries(entries.([]*logpb.LogEntry))
})
l.bundler.DelayThreshold = DefaultDelayThreshold
l.bundler.BundleCountThreshold = DefaultEntryCountThreshold
@ -744,13 +749,15 @@ func (l *Logger) Flush() error {
return l.client.extractErrorInfo()
}
func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) {
func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) {
req := &logpb.WriteLogEntriesRequest{
LogName: l.logName,
Resource: l.commonResource,
Labels: l.commonLabels,
Entries: entries,
}
ctx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
defer cancel()
_, err := l.client.client.WriteLogEntries(ctx, req)
if err != nil {
l.client.error(err)

@ -28,13 +28,13 @@ import (
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go"
"github.com/googleapis/gax-go"
"google.golang.org/grpc/status"
"golang.org/x/net/context"
autogen "cloud.google.com/go/longrunning/autogen"
pb "google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
@ -108,7 +108,7 @@ func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.C
switch r := op.proto.Result.(type) {
case *pb.Operation_Error:
// TODO (pongad): r.Details may contain further information
return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message)
return status.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message)
case *pb.Operation_Response:
if resp == nil {
return nil

@ -0,0 +1,274 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package monitoring
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
type AlertPolicyCallOptions struct {
ListAlertPolicies []gax.CallOption
GetAlertPolicy []gax.CallOption
CreateAlertPolicy []gax.CallOption
DeleteAlertPolicy []gax.CallOption
UpdateAlertPolicy []gax.CallOption
}
func defaultAlertPolicyClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("monitoring.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &AlertPolicyCallOptions{
ListAlertPolicies: retry[[2]string{"default", "idempotent"}],
GetAlertPolicy: retry[[2]string{"default", "idempotent"}],
CreateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
DeleteAlertPolicy: retry[[2]string{"default", "idempotent"}],
UpdateAlertPolicy: retry[[2]string{"default", "non_idempotent"}],
}
}
// AlertPolicyClient is a client for interacting with Stackdriver Monitoring API.
type AlertPolicyClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
alertPolicyClient monitoringpb.AlertPolicyServiceClient
// The call options for this service.
CallOptions *AlertPolicyCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewAlertPolicyClient creates a new alert policy service client.
//
// The AlertPolicyService API is used to manage (list, create, delete,
// edit) alert policies in Stackdriver Monitoring. An alerting policy is
// a description of the conditions under which some aspect of your
// system is considered to be "unhealthy" and the ways to notify
// people or services about this state. In addition to using this API, alert
// policies can also be managed through
// Stackdriver Monitoring (at https://cloud.google.com/monitoring/docs/),
// which can be reached by clicking the "Monitoring" tab in
// Cloud Console (at https://console.cloud.google.com/).
func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultAlertPolicyClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &AlertPolicyClient{
conn: conn,
CallOptions: defaultAlertPolicyCallOptions(),
alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *AlertPolicyClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ListAlertPolicies lists the existing alerting policies for the project.
func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListAlertPolicies[0:len(c.CallOptions.ListAlertPolicies):len(c.CallOptions.ListAlertPolicies)], opts...)
it := &AlertPolicyIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
var resp *monitoringpb.ListAlertPoliciesResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.alertPolicyClient.ListAlertPolicies(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.AlertPolicies, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// GetAlertPolicy gets a single alerting policy.
func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetAlertPolicy[0:len(c.CallOptions.GetAlertPolicy):len(c.CallOptions.GetAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.alertPolicyClient.GetAlertPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateAlertPolicy creates a new alerting policy.
func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateAlertPolicy[0:len(c.CallOptions.CreateAlertPolicy):len(c.CallOptions.CreateAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.alertPolicyClient.CreateAlertPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteAlertPolicy deletes an alerting policy.
func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteAlertPolicy[0:len(c.CallOptions.DeleteAlertPolicy):len(c.CallOptions.DeleteAlertPolicy)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.alertPolicyClient.DeleteAlertPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
// a new one or replace only certain fields in the current alerting policy by
// specifying the fields to be updated via updateMask. Returns the
// updated alerting policy.
func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateAlertPolicy[0:len(c.CallOptions.UpdateAlertPolicy):len(c.CallOptions.UpdateAlertPolicy)], opts...)
var resp *monitoringpb.AlertPolicy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.alertPolicyClient.UpdateAlertPolicy(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
type AlertPolicyIterator struct {
items []*monitoringpb.AlertPolicy
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
var item *monitoringpb.AlertPolicy
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *AlertPolicyIterator) bufLen() int {
return len(it.items)
}
func (it *AlertPolicyIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

@ -0,0 +1,128 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package monitoring_test
import (
"cloud.google.com/go/monitoring/apiv3"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
)
func ExampleNewAlertPolicyClient() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleAlertPolicyClient_ListAlertPolicies() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.ListAlertPoliciesRequest{
// TODO: Fill request struct fields.
}
it := c.ListAlertPolicies(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleAlertPolicyClient_GetAlertPolicy() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.GetAlertPolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetAlertPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleAlertPolicyClient_CreateAlertPolicy() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.CreateAlertPolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateAlertPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleAlertPolicyClient_DeleteAlertPolicy() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.DeleteAlertPolicyRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteAlertPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
func ExampleAlertPolicyClient_UpdateAlertPolicy() {
ctx := context.Background()
c, err := monitoring.NewAlertPolicyClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.UpdateAlertPolicyRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateAlertPolicy(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

@ -48,6 +48,81 @@ var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockAlertPolicyServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
monitoringpb.AlertPolicyServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockAlertPolicyServer) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest) (*monitoringpb.ListAlertPoliciesResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.ListAlertPoliciesResponse), nil
}
func (s *mockAlertPolicyServer) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.AlertPolicy), nil
}
func (s *mockAlertPolicyServer) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.AlertPolicy), nil
}
func (s *mockAlertPolicyServer) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
func (s *mockAlertPolicyServer) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest) (*monitoringpb.AlertPolicy, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.AlertPolicy), nil
}
type mockGroupServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
@ -246,6 +321,105 @@ func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoring
return s.resps[0].(*emptypb.Empty), nil
}
type mockNotificationChannelServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
monitoringpb.NotificationChannelServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockNotificationChannelServer) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest) (*monitoringpb.ListNotificationChannelDescriptorsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.ListNotificationChannelDescriptorsResponse), nil
}
func (s *mockNotificationChannelServer) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest) (*monitoringpb.NotificationChannelDescriptor, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.NotificationChannelDescriptor), nil
}
func (s *mockNotificationChannelServer) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest) (*monitoringpb.ListNotificationChannelsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.ListNotificationChannelsResponse), nil
}
func (s *mockNotificationChannelServer) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.NotificationChannel), nil
}
func (s *mockNotificationChannelServer) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.NotificationChannel), nil
}
func (s *mockNotificationChannelServer) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest) (*monitoringpb.NotificationChannel, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*monitoringpb.NotificationChannel), nil
}
func (s *mockNotificationChannelServer) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest) (*emptypb.Empty, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*emptypb.Empty), nil
}
type mockUptimeCheckServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
@ -338,17 +512,21 @@ func (s *mockUptimeCheckServer) ListUptimeCheckIps(ctx context.Context, req *mon
var clientOpt option.ClientOption
var (
mockGroup mockGroupServer
mockMetric mockMetricServer
mockUptimeCheck mockUptimeCheckServer
mockAlertPolicy mockAlertPolicyServer
mockGroup mockGroupServer
mockMetric mockMetricServer
mockNotificationChannel mockNotificationChannelServer
mockUptimeCheck mockUptimeCheckServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
monitoringpb.RegisterAlertPolicyServiceServer(serv, &mockAlertPolicy)
monitoringpb.RegisterGroupServiceServer(serv, &mockGroup)
monitoringpb.RegisterMetricServiceServer(serv, &mockMetric)
monitoringpb.RegisterNotificationChannelServiceServer(serv, &mockNotificationChannel)
monitoringpb.RegisterUptimeCheckServiceServer(serv, &mockUptimeCheck)
lis, err := net.Listen("tcp", "localhost:0")
@ -366,6 +544,317 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func TestAlertPolicyServiceListAlertPolicies(t *testing.T) {
var nextPageToken string = ""
var alertPoliciesElement *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{}
var alertPolicies = []*monitoringpb.AlertPolicy{alertPoliciesElement}
var expectedResponse = &monitoringpb.ListAlertPoliciesResponse{
NextPageToken: nextPageToken,
AlertPolicies: alertPolicies,
}
mockAlertPolicy.err = nil
mockAlertPolicy.reqs = nil
mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListAlertPoliciesRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListAlertPolicies(context.Background(), request).Next()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
want := (interface{})(expectedResponse.AlertPolicies[0])
got := (interface{})(resp)
var ok bool
switch want := (want).(type) {
case proto.Message:
ok = proto.Equal(want, got.(proto.Message))
default:
ok = want == got
}
if !ok {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAlertPolicyServiceListAlertPoliciesError(t *testing.T) {
errCode := codes.PermissionDenied
mockAlertPolicy.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListAlertPoliciesRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListAlertPolicies(context.Background(), request).Next()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAlertPolicyServiceGetAlertPolicy(t *testing.T) {
var name2 string = "name2-1052831874"
var displayName string = "displayName1615086568"
var expectedResponse = &monitoringpb.AlertPolicy{
Name: name2,
DisplayName: displayName,
}
mockAlertPolicy.err = nil
mockAlertPolicy.reqs = nil
mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]")
var request = &monitoringpb.GetAlertPolicyRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetAlertPolicy(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAlertPolicyServiceGetAlertPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockAlertPolicy.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]")
var request = &monitoringpb.GetAlertPolicyRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetAlertPolicy(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAlertPolicyServiceCreateAlertPolicy(t *testing.T) {
var name2 string = "name2-1052831874"
var displayName string = "displayName1615086568"
var expectedResponse = &monitoringpb.AlertPolicy{
Name: name2,
DisplayName: displayName,
}
mockAlertPolicy.err = nil
mockAlertPolicy.reqs = nil
mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{}
var request = &monitoringpb.CreateAlertPolicyRequest{
Name: formattedName,
AlertPolicy: alertPolicy,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateAlertPolicy(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAlertPolicyServiceCreateAlertPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockAlertPolicy.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{}
var request = &monitoringpb.CreateAlertPolicyRequest{
Name: formattedName,
AlertPolicy: alertPolicy,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateAlertPolicy(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestAlertPolicyServiceDeleteAlertPolicy(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockAlertPolicy.err = nil
mockAlertPolicy.reqs = nil
mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]")
var request = &monitoringpb.DeleteAlertPolicyRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteAlertPolicy(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestAlertPolicyServiceDeleteAlertPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockAlertPolicy.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/alertPolicies/%s", "[PROJECT]", "[ALERT_POLICY]")
var request = &monitoringpb.DeleteAlertPolicyRequest{
Name: formattedName,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteAlertPolicy(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestAlertPolicyServiceUpdateAlertPolicy(t *testing.T) {
var name string = "name3373707"
var displayName string = "displayName1615086568"
var expectedResponse = &monitoringpb.AlertPolicy{
Name: name,
DisplayName: displayName,
}
mockAlertPolicy.err = nil
mockAlertPolicy.reqs = nil
mockAlertPolicy.resps = append(mockAlertPolicy.resps[:0], expectedResponse)
var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{}
var request = &monitoringpb.UpdateAlertPolicyRequest{
AlertPolicy: alertPolicy,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateAlertPolicy(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockAlertPolicy.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestAlertPolicyServiceUpdateAlertPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockAlertPolicy.err = gstatus.Error(errCode, "test error")
var alertPolicy *monitoringpb.AlertPolicy = &monitoringpb.AlertPolicy{}
var request = &monitoringpb.UpdateAlertPolicyRequest{
AlertPolicy: alertPolicy,
}
c, err := NewAlertPolicyClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateAlertPolicy(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestGroupServiceListGroups(t *testing.T) {
var nextPageToken string = ""
var groupElement *monitoringpb.Group = &monitoringpb.Group{}
@ -1308,6 +1797,466 @@ func TestMetricServiceCreateTimeSeriesError(t *testing.T) {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestNotificationChannelServiceListNotificationChannelDescriptors(t *testing.T) {
var nextPageToken string = ""
var channelDescriptorsElement *monitoringpb.NotificationChannelDescriptor = &monitoringpb.NotificationChannelDescriptor{}
var channelDescriptors = []*monitoringpb.NotificationChannelDescriptor{channelDescriptorsElement}
var expectedResponse = &monitoringpb.ListNotificationChannelDescriptorsResponse{
NextPageToken: nextPageToken,
ChannelDescriptors: channelDescriptors,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
want := (interface{})(expectedResponse.ChannelDescriptors[0])
got := (interface{})(resp)
var ok bool
switch want := (want).(type) {
case proto.Message:
ok = proto.Equal(want, got.(proto.Message))
default:
ok = want == got
}
if !ok {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceListNotificationChannelDescriptorsError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListNotificationChannelDescriptorsRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListNotificationChannelDescriptors(context.Background(), request).Next()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceGetNotificationChannelDescriptor(t *testing.T) {
var name2 string = "name2-1052831874"
var type_ string = "type3575610"
var displayName string = "displayName1615086568"
var description string = "description-1724546052"
var expectedResponse = &monitoringpb.NotificationChannelDescriptor{
Name: name2,
Type: type_,
DisplayName: displayName,
Description: description,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]")
var request = &monitoringpb.GetNotificationChannelDescriptorRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetNotificationChannelDescriptor(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceGetNotificationChannelDescriptorError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/notificationChannelDescriptors/%s", "[PROJECT]", "[CHANNEL_DESCRIPTOR]")
var request = &monitoringpb.GetNotificationChannelDescriptorRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetNotificationChannelDescriptor(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceListNotificationChannels(t *testing.T) {
var nextPageToken string = ""
var notificationChannelsElement *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{}
var notificationChannels = []*monitoringpb.NotificationChannel{notificationChannelsElement}
var expectedResponse = &monitoringpb.ListNotificationChannelsResponse{
NextPageToken: nextPageToken,
NotificationChannels: notificationChannels,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListNotificationChannelsRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListNotificationChannels(context.Background(), request).Next()
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
want := (interface{})(expectedResponse.NotificationChannels[0])
got := (interface{})(resp)
var ok bool
switch want := (want).(type) {
case proto.Message:
ok = proto.Equal(want, got.(proto.Message))
default:
ok = want == got
}
if !ok {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceListNotificationChannelsError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var request = &monitoringpb.ListNotificationChannelsRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListNotificationChannels(context.Background(), request).Next()
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceGetNotificationChannel(t *testing.T) {
var type_ string = "type3575610"
var name2 string = "name2-1052831874"
var displayName string = "displayName1615086568"
var description string = "description-1724546052"
var expectedResponse = &monitoringpb.NotificationChannel{
Type: type_,
Name: name2,
DisplayName: displayName,
Description: description,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]")
var request = &monitoringpb.GetNotificationChannelRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetNotificationChannel(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceGetNotificationChannelError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]")
var request = &monitoringpb.GetNotificationChannelRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.GetNotificationChannel(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceCreateNotificationChannel(t *testing.T) {
var type_ string = "type3575610"
var name2 string = "name2-1052831874"
var displayName string = "displayName1615086568"
var description string = "description-1724546052"
var expectedResponse = &monitoringpb.NotificationChannel{
Type: type_,
Name: name2,
DisplayName: displayName,
Description: description,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{}
var request = &monitoringpb.CreateNotificationChannelRequest{
Name: formattedName,
NotificationChannel: notificationChannel,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateNotificationChannel(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceCreateNotificationChannelError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s", "[PROJECT]")
var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{}
var request = &monitoringpb.CreateNotificationChannelRequest{
Name: formattedName,
NotificationChannel: notificationChannel,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.CreateNotificationChannel(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceUpdateNotificationChannel(t *testing.T) {
var type_ string = "type3575610"
var name string = "name3373707"
var displayName string = "displayName1615086568"
var description string = "description-1724546052"
var expectedResponse = &monitoringpb.NotificationChannel{
Type: type_,
Name: name,
DisplayName: displayName,
Description: description,
}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{}
var request = &monitoringpb.UpdateNotificationChannelRequest{
NotificationChannel: notificationChannel,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateNotificationChannel(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestNotificationChannelServiceUpdateNotificationChannelError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var notificationChannel *monitoringpb.NotificationChannel = &monitoringpb.NotificationChannel{}
var request = &monitoringpb.UpdateNotificationChannelRequest{
NotificationChannel: notificationChannel,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.UpdateNotificationChannel(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestNotificationChannelServiceDeleteNotificationChannel(t *testing.T) {
var expectedResponse *emptypb.Empty = &emptypb.Empty{}
mockNotificationChannel.err = nil
mockNotificationChannel.reqs = nil
mockNotificationChannel.resps = append(mockNotificationChannel.resps[:0], expectedResponse)
var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]")
var request = &monitoringpb.DeleteNotificationChannelRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteNotificationChannel(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockNotificationChannel.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
}
func TestNotificationChannelServiceDeleteNotificationChannelError(t *testing.T) {
errCode := codes.PermissionDenied
mockNotificationChannel.err = gstatus.Error(errCode, "test error")
var formattedName string = fmt.Sprintf("projects/%s/notificationChannels/%s", "[PROJECT]", "[NOTIFICATION_CHANNEL]")
var request = &monitoringpb.DeleteNotificationChannelRequest{
Name: formattedName,
}
c, err := NewNotificationChannelClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
err = c.DeleteNotificationChannel(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
func TestUptimeCheckServiceListUptimeCheckConfigs(t *testing.T) {
var nextPageToken string = ""
var uptimeCheckConfigsElement *monitoringpb.UptimeCheckConfig = &monitoringpb.UptimeCheckConfig{}

@ -0,0 +1,369 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package monitoring
import (
"math"
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
type NotificationChannelCallOptions struct {
ListNotificationChannelDescriptors []gax.CallOption
GetNotificationChannelDescriptor []gax.CallOption
ListNotificationChannels []gax.CallOption
GetNotificationChannel []gax.CallOption
CreateNotificationChannel []gax.CallOption
UpdateNotificationChannel []gax.CallOption
DeleteNotificationChannel []gax.CallOption
}
func defaultNotificationChannelClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("monitoring.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &NotificationChannelCallOptions{
ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}],
GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}],
ListNotificationChannels: retry[[2]string{"default", "idempotent"}],
GetNotificationChannel: retry[[2]string{"default", "idempotent"}],
CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}],
}
}
// NotificationChannelClient is a client for interacting with Stackdriver Monitoring API.
type NotificationChannelClient struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
notificationChannelClient monitoringpb.NotificationChannelServiceClient
// The call options for this service.
CallOptions *NotificationChannelCallOptions
// The x-goog-* metadata to be sent with each request.
xGoogMetadata metadata.MD
}
// NewNotificationChannelClient creates a new notification channel service client.
//
// The Notification Channel API provides access to configuration that
// controls how messages related to incidents are sent.
func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultNotificationChannelClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &NotificationChannelClient{
conn: conn,
CallOptions: defaultNotificationChannelCallOptions(),
notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(conn),
}
c.setGoogleClientInfo()
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *NotificationChannelClient) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
}
// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
// makes it possible for new channel types to be dynamically added.
func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListNotificationChannelDescriptors[0:len(c.CallOptions.ListNotificationChannelDescriptors):len(c.CallOptions.ListNotificationChannelDescriptors)], opts...)
it := &NotificationChannelDescriptorIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
var resp *monitoringpb.ListNotificationChannelDescriptorsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.ListNotificationChannelDescriptors(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.ChannelDescriptors, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
// are expected / permitted for a notification channel of the given type.
func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetNotificationChannelDescriptor[0:len(c.CallOptions.GetNotificationChannelDescriptor):len(c.CallOptions.GetNotificationChannelDescriptor)], opts...)
var resp *monitoringpb.NotificationChannelDescriptor
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.GetNotificationChannelDescriptor(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListNotificationChannels lists the notification channels that have been created for the project.
func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.ListNotificationChannels[0:len(c.CallOptions.ListNotificationChannels):len(c.CallOptions.ListNotificationChannels)], opts...)
it := &NotificationChannelIterator{}
it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
var resp *monitoringpb.ListNotificationChannelsResponse
req.PageToken = pageToken
if pageSize > math.MaxInt32 {
req.PageSize = math.MaxInt32
} else {
req.PageSize = int32(pageSize)
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.ListNotificationChannels(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, "", err
}
return resp.NotificationChannels, resp.NextPageToken, nil
}
fetch := func(pageSize int, pageToken string) (string, error) {
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
if err != nil {
return "", err
}
it.items = append(it.items, items...)
return nextPageToken, nil
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
return it
}
// GetNotificationChannel gets a single notification channel. The channel includes the relevant
// configuration details with which the channel was created. However, the
// response may truncate or omit passwords, API keys, or other private key
// matter and thus the response may not be 100% identical to the information
// that was supplied in the call to the create method.
func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.GetNotificationChannel[0:len(c.CallOptions.GetNotificationChannel):len(c.CallOptions.GetNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.GetNotificationChannel(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateNotificationChannel creates a new notification channel, representing a single notification
// endpoint such as an email address, SMS number, or pagerduty service.
func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.CreateNotificationChannel[0:len(c.CallOptions.CreateNotificationChannel):len(c.CallOptions.CreateNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.CreateNotificationChannel(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
// remain unchanged.
func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.UpdateNotificationChannel[0:len(c.CallOptions.UpdateNotificationChannel):len(c.CallOptions.UpdateNotificationChannel)], opts...)
var resp *monitoringpb.NotificationChannel
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.notificationChannelClient.UpdateNotificationChannel(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// DeleteNotificationChannel deletes a notification channel.
func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
opts = append(c.CallOptions.DeleteNotificationChannel[0:len(c.CallOptions.DeleteNotificationChannel):len(c.CallOptions.DeleteNotificationChannel)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
_, err = c.notificationChannelClient.DeleteNotificationChannel(ctx, req, settings.GRPC...)
return err
}, opts...)
return err
}
// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
type NotificationChannelDescriptorIterator struct {
items []*monitoringpb.NotificationChannelDescriptor
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
var item *monitoringpb.NotificationChannelDescriptor
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *NotificationChannelDescriptorIterator) bufLen() int {
return len(it.items)
}
func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}
// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
type NotificationChannelIterator struct {
items []*monitoringpb.NotificationChannel
pageInfo *iterator.PageInfo
nextFunc func() error
// InternalFetch is for use by the Google Cloud Libraries only.
// It is not part of the stable interface of this package.
//
// InternalFetch returns results from a single call to the underlying RPC.
// The number of results is no greater than pageSize.
// If there are no more results, nextPageToken is empty and err is nil.
InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
// Next returns the next result. Its second return value is iterator.Done if there are no more
// results. Once Next returns Done, all subsequent calls will return Done.
func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
var item *monitoringpb.NotificationChannel
if err := it.nextFunc(); err != nil {
return item, err
}
item = it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *NotificationChannelIterator) bufLen() int {
return len(it.items)
}
func (it *NotificationChannelIterator) takeBuf() interface{} {
b := it.items
it.items = nil
return b
}

@ -0,0 +1,170 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package monitoring_test
import (
"cloud.google.com/go/monitoring/apiv3"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
)
func ExampleNewNotificationChannelClient() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleNotificationChannelClient_ListNotificationChannelDescriptors() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.ListNotificationChannelDescriptorsRequest{
// TODO: Fill request struct fields.
}
it := c.ListNotificationChannelDescriptors(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleNotificationChannelClient_GetNotificationChannelDescriptor() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.GetNotificationChannelDescriptorRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetNotificationChannelDescriptor(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleNotificationChannelClient_ListNotificationChannels() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.ListNotificationChannelsRequest{
// TODO: Fill request struct fields.
}
it := c.ListNotificationChannels(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
}
func ExampleNotificationChannelClient_GetNotificationChannel() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.GetNotificationChannelRequest{
// TODO: Fill request struct fields.
}
resp, err := c.GetNotificationChannel(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleNotificationChannelClient_CreateNotificationChannel() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.CreateNotificationChannelRequest{
// TODO: Fill request struct fields.
}
resp, err := c.CreateNotificationChannel(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleNotificationChannelClient_UpdateNotificationChannel() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.UpdateNotificationChannelRequest{
// TODO: Fill request struct fields.
}
resp, err := c.UpdateNotificationChannel(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleNotificationChannelClient_DeleteNotificationChannel() {
ctx := context.Background()
c, err := monitoring.NewNotificationChannelClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &monitoringpb.DeleteNotificationChannelRequest{
// TODO: Fill request struct fields.
}
err = c.DeleteNotificationChannel(ctx, req)
if err != nil {
// TODO: Handle error.
}
}

Some files were not shown because too many files have changed in this diff Show More