Compare commits
322 Commits
v0.4.0-rc2
...
v0.5.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04cfa58445 | ||
|
|
8153d432ce | ||
|
|
c6d6eb863a | ||
|
|
786437901c | ||
|
|
9a4bc9a20d | ||
|
|
36ca9c6063 | ||
|
|
bccbe19a86 | ||
|
|
f3122dd853 | ||
|
|
f5b6c87e7c | ||
|
|
060f0a0db2 | ||
|
|
721f39d4af | ||
|
|
be5fce87b6 | ||
|
|
70e137991d | ||
|
|
0176f05db8 | ||
|
|
952539dd8a | ||
|
|
670d03a1b4 | ||
|
|
92d67ff13f | ||
|
|
fe94b4c44f | ||
|
|
86c52ec3e7 | ||
|
|
d855c35059 | ||
|
|
b213da0961 | ||
|
|
8e7a85b8b9 | ||
|
|
e53b52de03 | ||
|
|
c660130f17 | ||
|
|
232c611464 | ||
|
|
cf83277557 | ||
|
|
970c630fd3 | ||
|
|
77c2bbcdd2 | ||
|
|
c04eb43021 | ||
|
|
a512b907cf | ||
|
|
2339e548f6 | ||
|
|
52de03f64b | ||
|
|
8e58932db5 | ||
|
|
4cfcc3ba32 | ||
|
|
fb8d964966 | ||
|
|
9f01770d7e | ||
|
|
9a26a9fc04 | ||
|
|
104ebf58cf | ||
|
|
d316eb7e96 | ||
|
|
3dd578318a | ||
|
|
9fa2f73139 | ||
|
|
ae973a96c0 | ||
|
|
6c5dc41e6d | ||
|
|
57dc1156de | ||
|
|
a7559fd3a8 | ||
|
|
99b564e804 | ||
|
|
7ecb221444 | ||
|
|
2d1e6d0163 | ||
|
|
ab1b0ca2e9 | ||
|
|
f155bc9df9 | ||
|
|
956b0bb65c | ||
|
|
ee4a357441 | ||
|
|
e421a07336 | ||
|
|
fd5f3ad0cc | ||
|
|
b038ff4020 | ||
|
|
8b9291de64 | ||
|
|
4bd43d604f | ||
|
|
19625def22 | ||
|
|
4e7f8ed7a6 | ||
|
|
c99bb22eeb | ||
|
|
8cfc28e37a | ||
|
|
768a044dad | ||
|
|
658a6bc1b3 | ||
|
|
1df80ba111 | ||
|
|
76c7accda0 | ||
|
|
0ac5c901d2 | ||
|
|
834c687226 | ||
|
|
9911019102 | ||
|
|
e80e448cd5 | ||
|
|
7180ddca87 | ||
|
|
07bcb9a7aa | ||
|
|
f6bbbdce7f | ||
|
|
c60b2cb2f9 | ||
|
|
11fd9648ac | ||
|
|
1bfb34afd3 | ||
|
|
1a6a9de6c7 | ||
|
|
335811df0c | ||
|
|
5021ffd2e7 | ||
|
|
1d3268d128 | ||
|
|
e2b6a832ac | ||
|
|
035c0de9e5 | ||
|
|
27cdffe042 | ||
|
|
f80dc360c7 | ||
|
|
a1e610bdd3 | ||
|
|
cb473ad0a7 | ||
|
|
8d78aed26c | ||
|
|
19cb0d9b07 | ||
|
|
4157dff31c | ||
|
|
99aacc7b79 | ||
|
|
b6462eb6d0 | ||
|
|
c2589c8099 | ||
|
|
bf3cd8b8f4 | ||
|
|
de5d052985 | ||
|
|
f9e40c2ce2 | ||
|
|
70dfc1e4db | ||
|
|
20fa654a6b | ||
|
|
a21e8f1892 | ||
|
|
6bbec2df68 | ||
|
|
97fcc446d7 | ||
|
|
0cf8b761d7 | ||
|
|
6a7c8c2a25 | ||
|
|
56acdcf5ae | ||
|
|
62a28e1217 | ||
|
|
38b7fe5013 | ||
|
|
987718f24d | ||
|
|
c442690618 | ||
|
|
2b56c016b5 | ||
|
|
2c6a671015 | ||
|
|
e4f0e66cb7 | ||
|
|
d36fa5f9ff | ||
|
|
e2856e36f4 | ||
|
|
1b5bbaa494 | ||
|
|
6b7628a175 | ||
|
|
4c5615c5e4 | ||
|
|
417cd8a9d3 | ||
|
|
94361315a8 | ||
|
|
72fbf70899 | ||
|
|
e959f7f1df | ||
|
|
bd532747f1 | ||
|
|
3ab5520ea3 | ||
|
|
9c8b4e0da3 | ||
|
|
4a33fabe8b | ||
|
|
6cac8cc95f | ||
|
|
f8491c8140 | ||
|
|
950aa5cad2 | ||
|
|
56f457c2ef | ||
|
|
5bba2b3a64 | ||
|
|
c93b79abef | ||
|
|
17f0a32b79 | ||
|
|
7400a77b32 | ||
|
|
95f626ce21 | ||
|
|
486e359e63 | ||
|
|
447d262c06 | ||
|
|
b818d3f7e8 | ||
|
|
aebd037c46 | ||
|
|
ab79c4d9c1 | ||
|
|
2a6e3dbf40 | ||
|
|
43543d1579 | ||
|
|
581198de1b | ||
|
|
8aa1572e0d | ||
|
|
c8edb33ecd | ||
|
|
a63f26f438 | ||
|
|
600ffb7d20 | ||
|
|
3e2e3d0a59 | ||
|
|
53ace516a4 | ||
|
|
4c12502605 | ||
|
|
264ca68902 | ||
|
|
1bb6cae798 | ||
|
|
81923c0720 | ||
|
|
7f18deb79f | ||
|
|
0ce345a8b5 | ||
|
|
998ada5303 | ||
|
|
1e7e241d9e | ||
|
|
d181f532c4 | ||
|
|
28656c0e42 | ||
|
|
cd80b70d26 | ||
|
|
7fc4aedd1c | ||
|
|
da26cab450 | ||
|
|
ebd45f5d9b | ||
|
|
9473210fb4 | ||
|
|
7a3f2ec446 | ||
|
|
a82d2cd74d | ||
|
|
e256702c8c | ||
|
|
db9363ef46 | ||
|
|
6f3648735c | ||
|
|
6a5219b879 | ||
|
|
caab273780 | ||
|
|
7c8fc35c69 | ||
|
|
f4f3814d11 | ||
|
|
6829837fd5 | ||
|
|
fcb14b98d2 | ||
|
|
a7d08d9bcd | ||
|
|
074d7e3add | ||
|
|
53658e8932 | ||
|
|
f659e817b4 | ||
|
|
3ed0377956 | ||
|
|
2399bfe578 | ||
|
|
571712122d | ||
|
|
5af32bcdd3 | ||
|
|
f0ebeab041 | ||
|
|
b1ed661da9 | ||
|
|
56160e0058 | ||
|
|
870d5c9340 | ||
|
|
94c551b964 | ||
|
|
cc00ece828 | ||
|
|
306807b286 | ||
|
|
97ca43708a | ||
|
|
a5b0ebe0ec | ||
|
|
b01442c6bb | ||
|
|
ca4e17027d | ||
|
|
bd03e83239 | ||
|
|
c8f9b98c1d | ||
|
|
dd47c9c74e | ||
|
|
fe5a72299d | ||
|
|
2bc53a6ac3 | ||
|
|
2992237b03 | ||
|
|
1588f2217a | ||
|
|
b970dddc8b | ||
|
|
968660f6da | ||
|
|
eff8c21a87 | ||
|
|
c4cd2385bf | ||
|
|
d063fe598d | ||
|
|
9eef99285e | ||
|
|
0a2b9c2784 | ||
|
|
b6cb3e2b2d | ||
|
|
bee5cb4768 | ||
|
|
9453df4859 | ||
|
|
1ae222b926 | ||
|
|
e599819974 | ||
|
|
670d647216 | ||
|
|
8f49a5bc31 | ||
|
|
d22a4118d0 | ||
|
|
bae2d33e30 | ||
|
|
b5927f10c4 | ||
|
|
de249c2269 | ||
|
|
144b94c251 | ||
|
|
f2bb2e0e4e | ||
|
|
3b85723982 | ||
|
|
4ced2d8287 | ||
|
|
f0279172d5 | ||
|
|
fe5b5cf86d | ||
|
|
c1e46c7d7c | ||
|
|
0b5f4c8454 | ||
|
|
2a15d98575 | ||
|
|
0540e5e295 | ||
|
|
b2d8bcdd75 | ||
|
|
34731c7894 | ||
|
|
09d3dad46b | ||
|
|
b1701ccb8f | ||
|
|
36b9f25eb7 | ||
|
|
5fbfbe1ff7 | ||
|
|
0aca42985b | ||
|
|
1dfca96c69 | ||
|
|
b8150450a6 | ||
|
|
7e775fcfae | ||
|
|
102007b231 | ||
|
|
993b5f557f | ||
|
|
b91c117238 | ||
|
|
f531eed130 | ||
|
|
7e1e8b6717 | ||
|
|
f8535f1397 | ||
|
|
5e698f3d95 | ||
|
|
207a647a40 | ||
|
|
48d257aaf1 | ||
|
|
66078be1e5 | ||
|
|
bb45f83319 | ||
|
|
a0b9d179b1 | ||
|
|
dcdd1a4757 | ||
|
|
009ed57719 | ||
|
|
1a8fb2a071 | ||
|
|
b52fa33056 | ||
|
|
7c4d0a5da7 | ||
|
|
5ba35ac2de | ||
|
|
1d5fde2c1a | ||
|
|
1b0042e69b | ||
|
|
9d60bb7c73 | ||
|
|
4c96efa5fc | ||
|
|
0fb4991068 | ||
|
|
bd168c4050 | ||
|
|
5b1b4db2ad | ||
|
|
ebbc45abea | ||
|
|
c3ed882b76 | ||
|
|
3c712ae30a | ||
|
|
d4ce483ebc | ||
|
|
08208bd141 | ||
|
|
f8b0ca86c2 | ||
|
|
e75902ffbf | ||
|
|
b8f7c3446b | ||
|
|
ff1d6587ea | ||
|
|
3a49c165bd | ||
|
|
816d60203a | ||
|
|
c02b1c413e | ||
|
|
52dc5734a7 | ||
|
|
3f090d3f47 | ||
|
|
93366f22be | ||
|
|
ffe1a7940b | ||
|
|
5f362aca6b | ||
|
|
e416ac17d8 | ||
|
|
3c1caa5acb | ||
|
|
b876d5f81f | ||
|
|
6496c44261 | ||
|
|
9661bf89e1 | ||
|
|
627658242d | ||
|
|
28831d1f65 | ||
|
|
b101c29011 | ||
|
|
ca2ab8c137 | ||
|
|
219e5686bf | ||
|
|
52a9c70277 | ||
|
|
f3f8b268e0 | ||
|
|
02ef8557a2 | ||
|
|
49764bb47a | ||
|
|
c2c942411b | ||
|
|
07e7604f0e | ||
|
|
5e486e6bf2 | ||
|
|
8f82b76274 | ||
|
|
ecb727822c | ||
|
|
66b4e3b826 | ||
|
|
eaf593b3d3 | ||
|
|
8328e96d93 | ||
|
|
cf10bf45b5 | ||
|
|
04b435c832 | ||
|
|
8299069a46 | ||
|
|
c1acb74d7a | ||
|
|
226dc8d15a | ||
|
|
c5f0cfa041 | ||
|
|
83743d3134 | ||
|
|
0c96ea0de2 | ||
|
|
459915c57c | ||
|
|
003770f2d4 | ||
|
|
1c6960f5ca | ||
|
|
938290f773 | ||
|
|
d79f1eb9f3 | ||
|
|
73c548c1f9 | ||
|
|
1749d99b31 | ||
|
|
99544a4cb0 | ||
|
|
82a7bf4e0d | ||
|
|
5de71f7e38 | ||
|
|
b333ea5294 | ||
|
|
64e21418dd | ||
|
|
6726ce915e | ||
|
|
66114abb0d | ||
|
|
e7e68149e5 |
@@ -1,2 +1,4 @@
|
||||
docker-machine*
|
||||
.git
|
||||
*.log
|
||||
bin
|
||||
cover
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,2 +1,6 @@
|
||||
docker-machine*
|
||||
*.log
|
||||
*.iml
|
||||
.idea/
|
||||
./bin
|
||||
cover
|
||||
|
||||
19
.travis.yml
19
.travis.yml
@@ -1,14 +1,5 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.3
|
||||
install:
|
||||
- export GOPATH=${TRAVIS_BUILD_DIR}/Godeps/_workspace:$GOPATH
|
||||
- export PATH=${TRAVIS_BUILD_DIR}/Godeps/_workspace/bin:$PATH
|
||||
- go get -t -v ./...
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
script:
|
||||
- script/validate-dco
|
||||
- script/validate-gofmt
|
||||
- go test -v -short ./...
|
||||
- script/generate-coverage
|
||||
sudo: required
|
||||
dist: trusty
|
||||
language: bash
|
||||
services: docker
|
||||
script: USE_CONTAINER=true make dco fmt vet test-short test-long coverage-send
|
||||
|
||||
19
CHANGELOG.md
19
CHANGELOG.md
@@ -1,6 +1,25 @@
|
||||
Changelog
|
||||
==========
|
||||
|
||||
# 0.4.0 (2015-08-11)
|
||||
|
||||
## Updates
|
||||
- HTTP Proxy support for Docker Engine
|
||||
- RedHat distros now use Docker Yum repositories
|
||||
- Ability to set environment variables in the Docker Engine
|
||||
- Internal libmachine updates for stability
|
||||
|
||||
## Drivers
|
||||
- Google:
|
||||
- Preemptiple instances
|
||||
- Static IP support
|
||||
|
||||
## Fixes
|
||||
- Swarm Discovery Flag is verified
|
||||
- Timeout added to `ls` command to prevent hangups
|
||||
- SSH command failure now reports information about error
|
||||
- Configuration migration updates
|
||||
|
||||
# 0.3.0 (2015-06-18)
|
||||
|
||||
## Features
|
||||
|
||||
182
CONTRIBUTING.md
182
CONTRIBUTING.md
@@ -2,6 +2,7 @@
|
||||
|
||||
[](https://godoc.org/github.com/docker/machine)
|
||||
[](https://travis-ci.org/docker/machine)
|
||||
[](https://coveralls.io/github/docker/machine?branch=upstream-master)
|
||||
|
||||
Want to hack on Machine? Awesome! Here are instructions to get you
|
||||
started.
|
||||
@@ -10,106 +11,158 @@ Machine is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read
|
||||
[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||
Otherwise, please read [Docker's contributions
|
||||
guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md).
|
||||
|
||||
# Building
|
||||
|
||||
The requirements to build Machine are:
|
||||
|
||||
1. A running instance of Docker
|
||||
1. A running instance of Docker or a Golang 1.5 development environment
|
||||
2. The `bash` shell
|
||||
3. [Make](https://www.gnu.org/software/make/)
|
||||
|
||||
To build, run:
|
||||
## Build using Docker containers
|
||||
|
||||
$ script/build
|
||||
To build the `docker-machine` binary using containers, simply run:
|
||||
|
||||
From the Machine repository's root. Machine will run the build inside of a
|
||||
Docker container and the compiled binaries will appear in the project directory
|
||||
on the host.
|
||||
$ export USE_CONTAINER=true
|
||||
$ make build
|
||||
|
||||
By default, Machine will run a build which cross-compiles binaries for a variety
|
||||
of architectures and operating systems. If you know that you are only compiling
|
||||
for a particular architecture and/or operating system, you can speed up
|
||||
compilation by overriding the default argument that the build script passes
|
||||
to [gox](https://github.com/mitchellh/gox). This is very useful if you want
|
||||
to iterate quickly on a new feature, bug fix, etc.
|
||||
## Local Go 1.5 development environment
|
||||
|
||||
For instance, if you only want to compile for use on OS X with the x86_64 arch,
|
||||
run:
|
||||
Make sure the source code directory is under a correct directory structure to use Go 1.5 vendoring;
|
||||
example of cloning and preparing the correct environment `GOPATH`:
|
||||
```
|
||||
mkdir docker-machine
|
||||
cd docker-machine
|
||||
export GOPATH="$PWD"
|
||||
go get github.com/docker/machine
|
||||
cd docker-machine/src/github.com/docker/machine
|
||||
```
|
||||
|
||||
$ script/build -osarch="darwin/amd64"
|
||||
At this point, simply run:
|
||||
|
||||
If you don't need to run the `docker build` to generate the image on each
|
||||
compile, i.e. if you have built the image already, you can skip the image build
|
||||
using the `SKIP_BUILD` environment variable, for instance:
|
||||
$ make build
|
||||
|
||||
$ SKIP_BUILD=1 script/build -osarch="darwin/amd64"
|
||||
## Built binary
|
||||
|
||||
If you have any questions we're in #docker-machine on Freenode.
|
||||
After the build is complete a `bin/docker-machine` binary will be created.
|
||||
|
||||
## Unit Tests
|
||||
You may call:
|
||||
|
||||
To run the unit tests for the whole project, using the following script:
|
||||
$ make clean
|
||||
|
||||
$ script/test
|
||||
to clean-up build results.
|
||||
|
||||
This will run the unit tests inside of a container, so you don't have to worry
|
||||
about configuring your environment properly before doing so.
|
||||
## Tests and validation
|
||||
|
||||
To run the unit tests for only a specific subdirectory of the project, you can
|
||||
pass an argument to that script to specify which directory, e.g.:
|
||||
To run basic validation (dco, fmt), and the project unit tests, call:
|
||||
|
||||
$ script/test ./drivers/amazonec2
|
||||
$ make test
|
||||
|
||||
If you want more indepth validation (vet, lint), and all tests with race detection, call:
|
||||
|
||||
$ make validate
|
||||
|
||||
If you make a pull request, it is highly encouraged that you submit tests for
|
||||
the code that you have added or modified in the same pull request.
|
||||
|
||||
## Code Coverage
|
||||
|
||||
Machine includes a script to check for missing `*_test.go` files and to generate
|
||||
an [HTML-based representation of which code is covered by tests](http://blog.golang.org/cover#TOC_5.).
|
||||
To generate an html code coverage report of the Machine codebase, run:
|
||||
|
||||
To run the code coverage script, execute:
|
||||
make coverage-serve
|
||||
|
||||
```console
|
||||
$ ./script/coverage serve
|
||||
```
|
||||
And navigate to http://localhost:8000 (hit `CTRL+C` to stop the server).
|
||||
|
||||
You will see the results of the code coverage check as they come in.
|
||||
### Native build
|
||||
|
||||
This will also generate the code coverage website and serve it from a container
|
||||
on port 8000. By default, `/` will show you the source files from the base
|
||||
directory, and you can navigate to the coverage for any particular subdirectory
|
||||
of the Docker Machine repo's root by going to that path. For instance, to see
|
||||
the coverage for the VirtualBox driver's package, browse to `/drivers/virtualbox`.
|
||||
Alternatively, if you are building natively, you can simply run:
|
||||
|
||||
make coverage-html
|
||||
|
||||
This will generate and open the report file:
|
||||
|
||||

|
||||
|
||||
You can hit `CTRL+C` to stop the server.
|
||||
## List of all targets
|
||||
|
||||
### High-level targets
|
||||
|
||||
make clean
|
||||
make build
|
||||
make test
|
||||
make validate
|
||||
|
||||
### Advanced build targets
|
||||
|
||||
Just build the machine binary itself (native):
|
||||
|
||||
make machine
|
||||
|
||||
Just build the plugins (native):
|
||||
|
||||
make plugins
|
||||
|
||||
Build for all supported oses and architectures (binaries will be in the `bin` project subfolder):
|
||||
|
||||
make cross
|
||||
|
||||
Build for a specific list of oses and architectures:
|
||||
|
||||
TARGET_OS=linux TARGET_ARCH="amd64 arm" make cross
|
||||
|
||||
You can further control build options through the following environment variables:
|
||||
|
||||
DEBUG=true # enable debug build
|
||||
STATIC=true # build static (note: when cross-compiling, the build is always static)
|
||||
VERBOSE=true # verbose output
|
||||
PREFIX=folder # put binaries in another folder (not the default `./bin`)
|
||||
|
||||
Scrub build results:
|
||||
|
||||
make build-clean
|
||||
|
||||
### Coverage targets
|
||||
|
||||
make coverage-html
|
||||
make coverage-serve
|
||||
make coverage-send
|
||||
make coverage-generate
|
||||
make coverage-clean
|
||||
|
||||
### Tests targets
|
||||
|
||||
make test-short
|
||||
make test-long
|
||||
make test-integration
|
||||
|
||||
### Validation targets
|
||||
|
||||
make fmt
|
||||
make vet
|
||||
make lint
|
||||
make dco
|
||||
|
||||
## Integration Tests
|
||||
|
||||
### Setup
|
||||
|
||||
We utilize [BATS](https://github.com/sstephenson/bats) for integration testing.
|
||||
This runs tests against the generated binary. To use, first make sure to
|
||||
[install BATS](https://github.com/sstephenson/bats). Then run `./script/build`
|
||||
to generate the binary for your system.
|
||||
We use [BATS](https://github.com/sstephenson/bats) for integration testing, so,
|
||||
first make sure to [install it](https://github.com/sstephenson/bats#installing-bats-from-source).
|
||||
|
||||
### Basic Usage
|
||||
|
||||
Once you have the binary, the integration tests can be invoked using the
|
||||
`test/integration/run-bats.sh` wrapper script.
|
||||
You first need to build, calling `make build`.
|
||||
|
||||
Using this wrapper script, you can invoke a test or subset of tests for a
|
||||
particular driver. To set the driver, use the `DRIVER` environment variable.
|
||||
You can then invoke integration tests calling `DRIVER=foo make test-integration TESTSUITE`, where `TESTSUITE` is
|
||||
one of the `test/integration` subfolder, and `foo` is the specific driver you want to test.
|
||||
|
||||
The following examples are all shown relative to the project's root directory,
|
||||
but you should be able to invoke them from any directory without issue.
|
||||
|
||||
To invoke just one test:
|
||||
Examples:
|
||||
|
||||
```console
|
||||
$ DRIVER=virtualbox ./test/integration/run-bats.sh test/integration/core/core-commands.bats
|
||||
$ DRIVER=virtualbox make test-integration test/integration/core/core-commands.bats
|
||||
✓ virtualbox: machine should not exist
|
||||
✓ virtualbox: create
|
||||
✓ virtualbox: ls
|
||||
@@ -133,26 +186,13 @@ Cleaning up machines...
|
||||
Successfully removed bats-virtualbox-test
|
||||
```
|
||||
|
||||
To invoke a shared test with a different driver:
|
||||
|
||||
```console
|
||||
$ DRIVER=digitalocean ./test/integration/run-bats.sh test/integration/core/core-commands.bats
|
||||
...
|
||||
```
|
||||
|
||||
To invoke a directory of tests recursively:
|
||||
|
||||
```console
|
||||
$ DRIVER=virtualbox ./test/integration/run-bats.sh test/integration/core/
|
||||
$ DRIVER=virtualbox make test-integration test/integration/core/
|
||||
...
|
||||
```
|
||||
|
||||
If you want to invoke a group of tests across two or more different drivers at
|
||||
once (e.g. every test in the `drivers` directory), at the time of writing there
|
||||
is no first-class support to do so - you will have to write your own wrapper
|
||||
scripts, bash loops, etc. However, in the future, this may gain first-class
|
||||
support as usage patterns become more clear.
|
||||
|
||||
### Extra Create Arguments
|
||||
|
||||
In some cases, for instance to test the creation of a specific base OS (e.g.
|
||||
@@ -170,7 +210,7 @@ $ DRIVER=amazonec2 \
|
||||
AWS_ACCESS_KEY_ID=zzzzzzzzzzzzzzzz \
|
||||
AWS_AMI=ami-12663b7a \
|
||||
AWS_SSH_USER=ec2-user \
|
||||
./test/integration/run-bats.sh test/integration/core
|
||||
make test-integration test/integration/core
|
||||
```
|
||||
|
||||
in order to run the core tests on Red Hat Enterprise Linux on Amazon.
|
||||
|
||||
15
Dockerfile
15
Dockerfile
@@ -1,15 +1,12 @@
|
||||
FROM golang:1.4.2-cross
|
||||
FROM golang:1.5.1
|
||||
|
||||
# TODO: Vendor these `go get` commands using Godep.
|
||||
RUN go get github.com/mitchellh/gox
|
||||
RUN go get github.com/aktau/github-release
|
||||
RUN go get github.com/tools/godep
|
||||
RUN go get golang.org/x/tools/cmd/cover
|
||||
RUN go get github.com/golang/lint/golint \
|
||||
github.com/mattn/goveralls \
|
||||
golang.org/x/tools/cover \
|
||||
github.com/tools/godep \
|
||||
github.com/aktau/github-release
|
||||
|
||||
ENV GOPATH /go/src/github.com/docker/machine/Godeps/_workspace:/go
|
||||
ENV MACHINE_BINARY /go/src/github.com/docker/machine/docker-machine
|
||||
ENV USER root
|
||||
|
||||
WORKDIR /go/src/github.com/docker/machine
|
||||
|
||||
ADD . /go/src/github.com/docker/machine
|
||||
|
||||
10
Godeps/Godeps.json
generated
10
Godeps/Godeps.json
generated
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/docker/machine",
|
||||
"GoVersion": "go1.4.2",
|
||||
"GoVersion": "go1.5",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "code.google.com/p/goauth2/oauth",
|
||||
@@ -158,6 +158,14 @@
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi",
|
||||
"Rev": "a09229c13c2f13bbdedf7b31b506cad4c83ef3bf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "038cb4adce85ed41e285c2e7cc6221a92bfa44aa"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/cloud/compute",
|
||||
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
14
Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
generated
vendored
Normal file
14
Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
|
||||
install:
|
||||
- export GOPATH="$HOME/gopath"
|
||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
||||
- go get -v -t -d golang.org/x/oauth2/...
|
||||
|
||||
script:
|
||||
- go test -v golang.org/x/oauth2/...
|
||||
3
Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
generated
vendored
Normal file
3
Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
31
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
Normal file
31
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
||||
3
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
Normal file
3
Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
27
Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The oauth2 Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
64
Godeps/_workspace/src/golang.org/x/oauth2/README.md
generated
vendored
Normal file
64
Godeps/_workspace/src/golang.org/x/oauth2/README.md
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# OAuth2 for Go
|
||||
|
||||
[](https://travis-ci.org/golang/oauth2)
|
||||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
See godoc for further documentation and examples.
|
||||
|
||||
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
|
||||
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
|
||||
|
||||
|
||||
## App Engine
|
||||
|
||||
In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
|
||||
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
|
||||
the `golang.org/x/net/context` package
|
||||
|
||||
This means its no longer possible to use the "Classic App Engine"
|
||||
`appengine.Context` type with the `oauth2` package. (You're using
|
||||
Classic App Engine if you import the package `"appengine"`.)
|
||||
|
||||
To work around this, you may use the new `"google.golang.org/appengine"`
|
||||
package. This package has almost the same API as the `"appengine"` package,
|
||||
but it can be fetched with `go get` and used on "Managed VMs" and well as
|
||||
Classic App Engine.
|
||||
|
||||
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
|
||||
for information on updating your app.
|
||||
|
||||
If you don't want to update your entire app to use the new App Engine packages,
|
||||
you may use both sets of packages in parallel, using only the new packages
|
||||
with the `oauth2` package.
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
newappengine "google.golang.org/appengine"
|
||||
newurlfetch "google.golang.org/appengine/urlfetch"
|
||||
|
||||
"appengine"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
var c appengine.Context = appengine.NewContext(r)
|
||||
c.Infof("Logging a message with the old package")
|
||||
|
||||
var ctx context.Context = newappengine.NewContext(r)
|
||||
client := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: google.AppEngineTokenSource(ctx, "scope"),
|
||||
Base: &newurlfetch.Transport{Context: ctx},
|
||||
},
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/bitbucket/bitbucket.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/bitbucket/bitbucket.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2015 The oauth2 Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bitbucket provides constants for using OAuth2 to access Bitbucket.
|
||||
package bitbucket
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is Bitbucket's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://bitbucket.org/site/oauth2/authorize",
|
||||
TokenURL: "https://bitbucket.org/site/oauth2/access_token",
|
||||
}
|
||||
25
Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
generated
vendored
Normal file
25
Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// App Engine hooks.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"google.golang.org/appengine/urlfetch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.RegisterContextClientFunc(contextClientAppEngine)
|
||||
}
|
||||
|
||||
func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
|
||||
return urlfetch.Client(ctx), nil
|
||||
}
|
||||
112
Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go
generated
vendored
Normal file
112
Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
|
||||
// also known as the "two-legged OAuth 2.0".
|
||||
//
|
||||
// This should be used when the client is acting on its own behalf or when the client
|
||||
// is the resource owner. It may also be used when requesting access to protected
|
||||
// resources based on an authorization previously arranged with the authorization
|
||||
// server.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4
|
||||
package clientcredentials // import "golang.org/x/oauth2/clientcredentials"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// an *oauth2.Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *oauth2.Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
tk := &oauth2.Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
}
|
||||
return tk.WithExtra(t.Raw)
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is
|
||||
// returned along with an error.
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
||||
|
||||
// Client Credentials Config describes a 2-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// TokenURL is the resource server's token endpoint
|
||||
// URL. This is a constant specific to each server.
|
||||
TokenURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// Token uses client credentials to retrieve a token.
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If nil, http.DefaultClient is used.
|
||||
func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"client_credentials"},
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context) *http.Client {
|
||||
return oauth2.NewClient(ctx, c.TokenSource(ctx))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context and the
|
||||
// client ID and client secret.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
source := &tokenSource{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
return oauth2.ReuseTokenSource(nil, source)
|
||||
}
|
||||
|
||||
type tokenSource struct {
|
||||
ctx context.Context
|
||||
conf *Config
|
||||
}
|
||||
|
||||
// Token refreshes the token by using a new client credentials request.
|
||||
// tokens received this way do not include a refresh token
|
||||
func (c *tokenSource) Token() (*oauth2.Token, error) {
|
||||
return retrieveToken(c.ctx, c.conf, url.Values{
|
||||
"grant_type": {"client_credentials"},
|
||||
"scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
96
Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
generated
vendored
Normal file
96
Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package clientcredentials
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
func newConf(url string) *Config {
|
||||
return &Config{
|
||||
ClientID: "CLIENT_ID",
|
||||
ClientSecret: "CLIENT_SECRET",
|
||||
Scopes: []string{"scope1", "scope2"},
|
||||
TokenURL: url + "/token",
|
||||
}
|
||||
}
|
||||
|
||||
type mockTransport struct {
|
||||
rt func(req *http.Request) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
return t.rt(req)
|
||||
}
|
||||
|
||||
func TestTokenRequest(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token")
|
||||
}
|
||||
headerAuth := r.Header.Get("Authorization")
|
||||
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
|
||||
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
|
||||
}
|
||||
if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want {
|
||||
t.Errorf("Content-Type header = %q; want %q", got, want)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
r.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("failed reading request body: %s.", err)
|
||||
}
|
||||
if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
|
||||
t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2")
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tok, err := conf.Token(oauth2.NoContext)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Fatalf("token invalid. got: %#v", tok)
|
||||
}
|
||||
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
|
||||
t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c")
|
||||
}
|
||||
if tok.TokenType != "bearer" {
|
||||
t.Errorf("token type = %q; want %q", tok.TokenType, "bearer")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenRefreshRequest(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() == "/somethingelse" {
|
||||
return
|
||||
}
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
if headerContentType != "application/x-www-form-urlencoded" {
|
||||
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
|
||||
}
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
|
||||
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
c := conf.Client(oauth2.NoContext)
|
||||
c.Get(ts.URL + "/somethingelse")
|
||||
}
|
||||
45
Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
generated
vendored
Normal file
45
Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
func ExampleConfig() {
|
||||
conf := &oauth2.Config{
|
||||
ClientID: "YOUR_CLIENT_ID",
|
||||
ClientSecret: "YOUR_CLIENT_SECRET",
|
||||
Scopes: []string{"SCOPE1", "SCOPE2"},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://provider.com/o/oauth2/auth",
|
||||
TokenURL: "https://provider.com/o/oauth2/token",
|
||||
},
|
||||
}
|
||||
|
||||
// Redirect user to consent page to ask for permission
|
||||
// for the scopes specified above.
|
||||
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
|
||||
fmt.Printf("Visit the URL for the auth dialog: %v", url)
|
||||
|
||||
// Use the authorization code that is pushed to the redirect URL.
|
||||
// NewTransportWithCode will do the handshake to retrieve
|
||||
// an access token and initiate a Transport that is
|
||||
// authorized and authenticated by the retrieved token.
|
||||
var code string
|
||||
if _, err := fmt.Scan(&code); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tok, err := conf.Exchange(oauth2.NoContext, code)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
client := conf.Client(oauth2.NoContext, tok)
|
||||
client.Get("...")
|
||||
}
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package facebook provides constants for using OAuth2 to access Facebook.
|
||||
package facebook // import "golang.org/x/oauth2/facebook"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is Facebook's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://www.facebook.com/dialog/oauth",
|
||||
TokenURL: "https://graph.facebook.com/oauth/access_token",
|
||||
}
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package github provides constants for using OAuth2 to access Github.
|
||||
package github // import "golang.org/x/oauth2/github"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is Github's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://github.com/login/oauth/authorize",
|
||||
TokenURL: "https://github.com/login/oauth/access_token",
|
||||
}
|
||||
86
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
generated
vendored
Normal file
86
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs.
|
||||
var appengineVM bool
|
||||
|
||||
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
|
||||
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
|
||||
|
||||
// AppEngineTokenSource returns a token source that fetches tokens
|
||||
// issued to the current App Engine application's service account.
|
||||
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
|
||||
// that involves user accounts, see oauth2.Config instead.
|
||||
//
|
||||
// The provided context must have come from appengine.NewContext.
|
||||
func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
|
||||
if appengineTokenFunc == nil {
|
||||
panic("google: AppEngineTokenSource can only be used on App Engine.")
|
||||
}
|
||||
scopes := append([]string{}, scope...)
|
||||
sort.Strings(scopes)
|
||||
return &appEngineTokenSource{
|
||||
ctx: ctx,
|
||||
scopes: scopes,
|
||||
key: strings.Join(scopes, " "),
|
||||
}
|
||||
}
|
||||
|
||||
// aeTokens helps the fetched tokens to be reused until their expiration.
|
||||
var (
|
||||
aeTokensMu sync.Mutex
|
||||
aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
|
||||
)
|
||||
|
||||
type tokenLock struct {
|
||||
mu sync.Mutex // guards t; held while fetching or updating t
|
||||
t *oauth2.Token
|
||||
}
|
||||
|
||||
type appEngineTokenSource struct {
|
||||
ctx context.Context
|
||||
scopes []string
|
||||
key string // to aeTokens map; space-separated scopes
|
||||
}
|
||||
|
||||
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
|
||||
if appengineTokenFunc == nil {
|
||||
panic("google: AppEngineTokenSource can only be used on App Engine.")
|
||||
}
|
||||
|
||||
aeTokensMu.Lock()
|
||||
tok, ok := aeTokens[ts.key]
|
||||
if !ok {
|
||||
tok = &tokenLock{}
|
||||
aeTokens[ts.key] = tok
|
||||
}
|
||||
aeTokensMu.Unlock()
|
||||
|
||||
tok.mu.Lock()
|
||||
defer tok.mu.Unlock()
|
||||
if tok.t.Valid() {
|
||||
return tok.t, nil
|
||||
}
|
||||
access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tok.t = &oauth2.Token{
|
||||
AccessToken: access,
|
||||
Expiry: exp,
|
||||
}
|
||||
return tok.t, nil
|
||||
}
|
||||
13
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
generated
vendored
Normal file
13
Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package google
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
appengineTokenFunc = appengine.AccessToken
|
||||
}
|
||||
14
Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm_hook.go
generated
vendored
Normal file
14
Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm_hook.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2015 The oauth2 Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm
|
||||
|
||||
package google
|
||||
|
||||
import "google.golang.org/appengine"
|
||||
|
||||
func init() {
|
||||
appengineVM = true
|
||||
appengineTokenFunc = appengine.AccessToken
|
||||
}
|
||||
155
Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
generated
vendored
Normal file
155
Godeps/_workspace/src/golang.org/x/oauth2/google/default.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/cloud/compute/metadata"
|
||||
)
|
||||
|
||||
// DefaultClient returns an HTTP Client that uses the
|
||||
// DefaultTokenSource to obtain authentication credentials.
|
||||
//
|
||||
// This client should be used when developing services
|
||||
// that run on Google App Engine or Google Compute Engine
|
||||
// and use "Application Default Credentials."
|
||||
//
|
||||
// For more details, see:
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials
|
||||
//
|
||||
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
|
||||
ts, err := DefaultTokenSource(ctx, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return oauth2.NewClient(ctx, ts), nil
|
||||
}
|
||||
|
||||
// DefaultTokenSource is a token source that uses
|
||||
// "Application Default Credentials".
|
||||
//
|
||||
// It looks for credentials in the following places,
|
||||
// preferring the first location found:
|
||||
//
|
||||
// 1. A JSON file whose path is specified by the
|
||||
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// 2. A JSON file in a location known to the gcloud command-line tool.
|
||||
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
|
||||
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
|
||||
// 3. On Google App Engine it uses the appengine.AccessToken function.
|
||||
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
|
||||
// credentials from the metadata server.
|
||||
// (In this final case any provided scopes are ignored.)
|
||||
//
|
||||
// For more details, see:
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials
|
||||
//
|
||||
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
|
||||
// First, try the environment variable.
|
||||
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
|
||||
if filename := os.Getenv(envVar); filename != "" {
|
||||
ts, err := tokenSourceFromFile(ctx, filename, scope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// Second, try a well-known file.
|
||||
filename := wellKnownFile()
|
||||
_, err := os.Stat(filename)
|
||||
if err == nil {
|
||||
ts, err2 := tokenSourceFromFile(ctx, filename, scope)
|
||||
if err2 == nil {
|
||||
return ts, nil
|
||||
}
|
||||
err = err2
|
||||
} else if os.IsNotExist(err) {
|
||||
err = nil // ignore this error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
|
||||
}
|
||||
|
||||
// Third, if we're on Google App Engine use those credentials.
|
||||
if appengineTokenFunc != nil && !appengineVM {
|
||||
return AppEngineTokenSource(ctx, scope...), nil
|
||||
}
|
||||
|
||||
// Fourth, if we're on Google Compute Engine use the metadata server.
|
||||
if metadata.OnGCE() {
|
||||
return ComputeTokenSource(""), nil
|
||||
}
|
||||
|
||||
// None are found; return helpful error.
|
||||
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
|
||||
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
|
||||
}
|
||||
|
||||
func wellKnownFile() string {
|
||||
const f = "application_default_credentials.json"
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
|
||||
}
|
||||
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
|
||||
}
|
||||
|
||||
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var d struct {
|
||||
// Common fields
|
||||
Type string
|
||||
ClientID string `json:"client_id"`
|
||||
|
||||
// User Credential fields
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
||||
// Service Account fields
|
||||
ClientEmail string `json:"client_email"`
|
||||
PrivateKeyID string `json:"private_key_id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(b, &d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch d.Type {
|
||||
case "authorized_user":
|
||||
cfg := &oauth2.Config{
|
||||
ClientID: d.ClientID,
|
||||
ClientSecret: d.ClientSecret,
|
||||
Scopes: append([]string{}, scopes...), // copy
|
||||
Endpoint: Endpoint,
|
||||
}
|
||||
tok := &oauth2.Token{RefreshToken: d.RefreshToken}
|
||||
return cfg.TokenSource(ctx, tok), nil
|
||||
case "service_account":
|
||||
cfg := &jwt.Config{
|
||||
Email: d.ClientEmail,
|
||||
PrivateKey: []byte(d.PrivateKey),
|
||||
Scopes: append([]string{}, scopes...), // copy
|
||||
TokenURL: JWTTokenURL,
|
||||
}
|
||||
return cfg.TokenSource(ctx), nil
|
||||
case "":
|
||||
return nil, errors.New("missing 'type' field in credentials")
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown credential type: %q", d.Type)
|
||||
}
|
||||
}
|
||||
150
Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
generated
vendored
Normal file
150
Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appenginevm !appengine
|
||||
|
||||
package google_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/appengine"
|
||||
"google.golang.org/appengine/urlfetch"
|
||||
)
|
||||
|
||||
func ExampleDefaultClient() {
|
||||
client, err := google.DefaultClient(oauth2.NoContext,
|
||||
"https://www.googleapis.com/auth/devstorage.full_control")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func Example_webServer() {
|
||||
// Your credentials should be obtained from the Google
|
||||
// Developer Console (https://console.developers.google.com).
|
||||
conf := &oauth2.Config{
|
||||
ClientID: "YOUR_CLIENT_ID",
|
||||
ClientSecret: "YOUR_CLIENT_SECRET",
|
||||
RedirectURL: "YOUR_REDIRECT_URL",
|
||||
Scopes: []string{
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
"https://www.googleapis.com/auth/blogger",
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
}
|
||||
// Redirect user to Google's consent page to ask for permission
|
||||
// for the scopes specified above.
|
||||
url := conf.AuthCodeURL("state")
|
||||
fmt.Printf("Visit the URL for the auth dialog: %v", url)
|
||||
|
||||
// Handle the exchange code to initiate a transport.
|
||||
tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
client := conf.Client(oauth2.NoContext, tok)
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func ExampleJWTConfigFromJSON() {
|
||||
// Your credentials should be obtained from the Google
|
||||
// Developer Console (https://console.developers.google.com).
|
||||
// Navigate to your project, then see the "Credentials" page
|
||||
// under "APIs & Auth".
|
||||
// To create a service account client, click "Create new Client ID",
|
||||
// select "Service Account", and click "Create Client ID". A JSON
|
||||
// key file will then be downloaded to your computer.
|
||||
data, err := ioutil.ReadFile("/path/to/your-project-key.json")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Initiate an http.Client. The following GET request will be
|
||||
// authorized and authenticated on the behalf of
|
||||
// your service account.
|
||||
client := conf.Client(oauth2.NoContext)
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func ExampleSDKConfig() {
|
||||
// The credentials will be obtained from the first account that
|
||||
// has been authorized with `gcloud auth login`.
|
||||
conf, err := google.NewSDKConfig("")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Initiate an http.Client. The following GET request will be
|
||||
// authorized and authenticated on the behalf of the SDK user.
|
||||
client := conf.Client(oauth2.NoContext)
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func Example_serviceAccount() {
|
||||
// Your credentials should be obtained from the Google
|
||||
// Developer Console (https://console.developers.google.com).
|
||||
conf := &jwt.Config{
|
||||
Email: "xxx@developer.gserviceaccount.com",
|
||||
// The contents of your RSA private key or your PEM file
|
||||
// that contains a private key.
|
||||
// If you have a p12 file instead, you
|
||||
// can use `openssl` to export the private key into a pem file.
|
||||
//
|
||||
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
|
||||
//
|
||||
// The field only supports PEM containers with no passphrase.
|
||||
// The openssl command will convert p12 keys to passphrase-less PEM containers.
|
||||
PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
|
||||
Scopes: []string{
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
"https://www.googleapis.com/auth/blogger",
|
||||
},
|
||||
TokenURL: google.JWTTokenURL,
|
||||
// If you would like to impersonate a user, you can
|
||||
// create a transport with a subject. The following GET
|
||||
// request will be made on the behalf of user@example.com.
|
||||
// Optional.
|
||||
Subject: "user@example.com",
|
||||
}
|
||||
// Initiate an http.Client, the following GET request will be
|
||||
// authorized and authenticated on the behalf of user@example.com.
|
||||
client := conf.Client(oauth2.NoContext)
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func ExampleAppEngineTokenSource() {
|
||||
var req *http.Request // from the ServeHTTP handler
|
||||
ctx := appengine.NewContext(req)
|
||||
client := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
|
||||
Base: &urlfetch.Transport{
|
||||
Context: ctx,
|
||||
},
|
||||
},
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
func ExampleComputeTokenSource() {
|
||||
client := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
// Fetch from Google Compute Engine's metadata server to retrieve
|
||||
// an access token for the provided account.
|
||||
// If no account is specified, "default" is used.
|
||||
Source: google.ComputeTokenSource(""),
|
||||
},
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
145
Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
generated
vendored
Normal file
145
Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package google provides support for making OAuth2 authorized and
|
||||
// authenticated HTTP requests to Google APIs.
|
||||
// It supports the Web server flow, client-side credentials, service accounts,
|
||||
// Google Compute Engine service accounts, and Google App Engine service
|
||||
// accounts.
|
||||
//
|
||||
// For more information, please read
|
||||
// https://developers.google.com/accounts/docs/OAuth2
|
||||
// and
|
||||
// https://developers.google.com/accounts/docs/application-default-credentials.
|
||||
package google // import "golang.org/x/oauth2/google"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/cloud/compute/metadata"
|
||||
)
|
||||
|
||||
// Endpoint is Google's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.google.com/o/oauth2/auth",
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
}
|
||||
|
||||
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
|
||||
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
|
||||
|
||||
// ConfigFromJSON uses a Google Developers Console client_credentials.json
|
||||
// file to construct a config.
|
||||
// client_credentials.json can be downloadable from https://console.developers.google.com,
|
||||
// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
|
||||
// JSON format and provide the contents of the file as jsonKey.
|
||||
func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
|
||||
type cred struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
RedirectURIs []string `json:"redirect_uris"`
|
||||
AuthURI string `json:"auth_uri"`
|
||||
TokenURI string `json:"token_uri"`
|
||||
}
|
||||
var j struct {
|
||||
Web *cred `json:"web"`
|
||||
Installed *cred `json:"installed"`
|
||||
}
|
||||
if err := json.Unmarshal(jsonKey, &j); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var c *cred
|
||||
switch {
|
||||
case j.Web != nil:
|
||||
c = j.Web
|
||||
case j.Installed != nil:
|
||||
c = j.Installed
|
||||
default:
|
||||
return nil, fmt.Errorf("oauth2/google: no credentials found")
|
||||
}
|
||||
if len(c.RedirectURIs) < 1 {
|
||||
return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
|
||||
}
|
||||
return &oauth2.Config{
|
||||
ClientID: c.ClientID,
|
||||
ClientSecret: c.ClientSecret,
|
||||
RedirectURL: c.RedirectURIs[0],
|
||||
Scopes: scope,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: c.AuthURI,
|
||||
TokenURL: c.TokenURI,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
|
||||
// the credentials that authorize and authenticate the requests.
|
||||
// Create a service account on "Credentials" page under "APIs & Auth" for your
|
||||
// project at https://console.developers.google.com to download a JSON key file.
|
||||
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
|
||||
var key struct {
|
||||
Email string `json:"client_email"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(jsonKey, &key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &jwt.Config{
|
||||
Email: key.Email,
|
||||
PrivateKey: []byte(key.PrivateKey),
|
||||
Scopes: scope,
|
||||
TokenURL: JWTTokenURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ComputeTokenSource returns a token source that fetches access tokens
|
||||
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
|
||||
// this token source if your program is running on a GCE instance.
|
||||
// If no account is specified, "default" is used.
|
||||
// Further information about retrieving access tokens from the GCE metadata
|
||||
// server can be found at https://cloud.google.com/compute/docs/authentication.
|
||||
func ComputeTokenSource(account string) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSource(nil, computeSource{account: account})
|
||||
}
|
||||
|
||||
type computeSource struct {
|
||||
account string
|
||||
}
|
||||
|
||||
func (cs computeSource) Token() (*oauth2.Token, error) {
|
||||
if !metadata.OnGCE() {
|
||||
return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
|
||||
}
|
||||
acct := cs.account
|
||||
if acct == "" {
|
||||
acct = "default"
|
||||
}
|
||||
tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresInSec int `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
|
||||
}
|
||||
if res.ExpiresInSec == 0 || res.AccessToken == "" {
|
||||
return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
|
||||
}
|
||||
return &oauth2.Token{
|
||||
AccessToken: res.AccessToken,
|
||||
TokenType: res.TokenType,
|
||||
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
|
||||
}, nil
|
||||
}
|
||||
67
Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
generated
vendored
Normal file
67
Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var webJSONKey = []byte(`
|
||||
{
|
||||
"web": {
|
||||
"auth_uri": "https://google.com/o/oauth2/auth",
|
||||
"client_secret": "3Oknc4jS_wA2r9i",
|
||||
"token_uri": "https://google.com/o/oauth2/token",
|
||||
"client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
|
||||
"redirect_uris": ["https://www.example.com/oauth2callback"],
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
|
||||
"client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"javascript_origins": ["https://www.example.com"]
|
||||
}
|
||||
}`)
|
||||
|
||||
var installedJSONKey = []byte(`{
|
||||
"installed": {
|
||||
"client_id": "222-installed.apps.googleusercontent.com",
|
||||
"redirect_uris": ["https://www.example.com/oauth2callback"]
|
||||
}
|
||||
}`)
|
||||
|
||||
func TestConfigFromJSON(t *testing.T) {
|
||||
conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want {
|
||||
t.Errorf("ClientID = %q; want %q", got, want)
|
||||
}
|
||||
if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want {
|
||||
t.Errorf("ClientSecret = %q; want %q", got, want)
|
||||
}
|
||||
if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want {
|
||||
t.Errorf("RedictURL = %q; want %q", got, want)
|
||||
}
|
||||
if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
|
||||
t.Errorf("Scopes = %q; want %q", got, want)
|
||||
}
|
||||
if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want {
|
||||
t.Errorf("AuthURL = %q; want %q", got, want)
|
||||
}
|
||||
if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want {
|
||||
t.Errorf("TokenURL = %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigFromJSON_Installed(t *testing.T) {
|
||||
conf, err := ConfigFromJSON(installedJSONKey)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want {
|
||||
t.Errorf("ClientID = %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
71
Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go
generated
vendored
Normal file
71
Godeps/_workspace/src/golang.org/x/oauth2/google/jwt.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
|
||||
// key file to read the credentials that authorize and authenticate the
|
||||
// requests, and returns a TokenSource that does not use any OAuth2 flow but
|
||||
// instead creates a JWT and sends that as the access token.
|
||||
// The audience is typically a URL that specifies the scope of the credentials.
|
||||
//
|
||||
// Note that this is not a standard OAuth flow, but rather an
|
||||
// optimization supported by a few Google services.
|
||||
// Unless you know otherwise, you should use JWTConfigFromJSON instead.
|
||||
func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
|
||||
cfg, err := JWTConfigFromJSON(jsonKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
|
||||
}
|
||||
pk, err := internal.ParseKey(cfg.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not parse key: %v", err)
|
||||
}
|
||||
ts := &jwtAccessTokenSource{
|
||||
email: cfg.Email,
|
||||
audience: audience,
|
||||
pk: pk,
|
||||
}
|
||||
tok, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return oauth2.ReuseTokenSource(tok, ts), nil
|
||||
}
|
||||
|
||||
type jwtAccessTokenSource struct {
|
||||
email, audience string
|
||||
pk *rsa.PrivateKey
|
||||
}
|
||||
|
||||
func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
|
||||
iat := time.Now()
|
||||
exp := iat.Add(time.Hour)
|
||||
cs := &jws.ClaimSet{
|
||||
Iss: ts.email,
|
||||
Sub: ts.email,
|
||||
Aud: ts.audience,
|
||||
Iat: iat.Unix(),
|
||||
Exp: exp.Unix(),
|
||||
}
|
||||
hdr := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
}
|
||||
msg, err := jws.Encode(hdr, cs, ts.pk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google: could not encode JWT: %v", err)
|
||||
}
|
||||
return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
|
||||
}
|
||||
168
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
generated
vendored
Normal file
168
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
type sdkCredentials struct {
|
||||
Data []struct {
|
||||
Credential struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenExpiry *time.Time `json:"token_expiry"`
|
||||
} `json:"credential"`
|
||||
Key struct {
|
||||
Account string `json:"account"`
|
||||
Scope string `json:"scope"`
|
||||
} `json:"key"`
|
||||
}
|
||||
}
|
||||
|
||||
// An SDKConfig provides access to tokens from an account already
|
||||
// authorized via the Google Cloud SDK.
|
||||
type SDKConfig struct {
|
||||
conf oauth2.Config
|
||||
initialToken *oauth2.Token
|
||||
}
|
||||
|
||||
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
|
||||
// account. If account is empty, the account currently active in
|
||||
// Google Cloud SDK properties is used.
|
||||
// Google Cloud SDK credentials must be created by running `gcloud auth`
|
||||
// before using this function.
|
||||
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
|
||||
func NewSDKConfig(account string) (*SDKConfig, error) {
|
||||
configPath, err := sdkConfigPath()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
|
||||
}
|
||||
credentialsPath := filepath.Join(configPath, "credentials")
|
||||
f, err := os.Open(credentialsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var c sdkCredentials
|
||||
if err := json.NewDecoder(f).Decode(&c); err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
|
||||
}
|
||||
if len(c.Data) == 0 {
|
||||
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
|
||||
}
|
||||
if account == "" {
|
||||
propertiesPath := filepath.Join(configPath, "properties")
|
||||
f, err := os.Open(propertiesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
ini, err := internal.ParseINI(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
|
||||
}
|
||||
core, ok := ini["core"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
|
||||
}
|
||||
active, ok := core["account"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
|
||||
}
|
||||
account = active
|
||||
}
|
||||
|
||||
for _, d := range c.Data {
|
||||
if account == "" || d.Key.Account == account {
|
||||
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
|
||||
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
|
||||
}
|
||||
var expiry time.Time
|
||||
if d.Credential.TokenExpiry != nil {
|
||||
expiry = *d.Credential.TokenExpiry
|
||||
}
|
||||
return &SDKConfig{
|
||||
conf: oauth2.Config{
|
||||
ClientID: d.Credential.ClientID,
|
||||
ClientSecret: d.Credential.ClientSecret,
|
||||
Scopes: strings.Split(d.Key.Scope, " "),
|
||||
Endpoint: Endpoint,
|
||||
RedirectURL: "oob",
|
||||
},
|
||||
initialToken: &oauth2.Token{
|
||||
AccessToken: d.Credential.AccessToken,
|
||||
RefreshToken: d.Credential.RefreshToken,
|
||||
Expiry: expiry,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using Google Cloud SDK credentials to
|
||||
// authorize requests. The token will auto-refresh as necessary. The
|
||||
// underlying http.RoundTripper will be obtained using the provided
|
||||
// context. The returned client and its Transport should not be
|
||||
// modified.
|
||||
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: c.TokenSource(ctx),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
|
||||
// Google Cloud SDK credentials using the provided context.
|
||||
// It will returns the current access token stored in the credentials,
|
||||
// and refresh it when it expires, but it won't update the credentials
|
||||
// with the new access token.
|
||||
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
return c.conf.TokenSource(ctx, c.initialToken)
|
||||
}
|
||||
|
||||
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
|
||||
func (c *SDKConfig) Scopes() []string {
|
||||
return c.conf.Scopes
|
||||
}
|
||||
|
||||
// sdkConfigPath tries to guess where the gcloud config is located.
|
||||
// It can be overridden during tests.
|
||||
var sdkConfigPath = func() (string, error) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
|
||||
}
|
||||
homeDir := guessUnixHomeDir()
|
||||
if homeDir == "" {
|
||||
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
|
||||
}
|
||||
return filepath.Join(homeDir, ".config", "gcloud"), nil
|
||||
}
|
||||
|
||||
func guessUnixHomeDir() string {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
||||
46
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
generated
vendored
Normal file
46
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package google
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestSDKConfig(t *testing.T) {
|
||||
sdkConfigPath = func() (string, error) {
|
||||
return "testdata/gcloud", nil
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
account string
|
||||
accessToken string
|
||||
err bool
|
||||
}{
|
||||
{"", "bar_access_token", false},
|
||||
{"foo@example.com", "foo_access_token", false},
|
||||
{"bar@example.com", "bar_access_token", false},
|
||||
{"baz@serviceaccount.example.com", "", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
c, err := NewSDKConfig(tt.account)
|
||||
if got, want := err != nil, tt.err; got != want {
|
||||
if !tt.err {
|
||||
t.Errorf("expected no error, got error: %v", tt.err, err)
|
||||
} else {
|
||||
t.Errorf("expected error, got none")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
tok := c.initialToken
|
||||
if tok == nil {
|
||||
t.Errorf("expected token %q, got: nil", tt.accessToken)
|
||||
continue
|
||||
}
|
||||
if tok.AccessToken != tt.accessToken {
|
||||
t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
122
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
generated
vendored
Normal file
122
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"credential": {
|
||||
"_class": "OAuth2Credentials",
|
||||
"_module": "oauth2client.client",
|
||||
"access_token": "foo_access_token",
|
||||
"client_id": "foo_client_id",
|
||||
"client_secret": "foo_client_secret",
|
||||
"id_token": {
|
||||
"at_hash": "foo_at_hash",
|
||||
"aud": "foo_aud",
|
||||
"azp": "foo_azp",
|
||||
"cid": "foo_cid",
|
||||
"email": "foo@example.com",
|
||||
"email_verified": true,
|
||||
"exp": 1420573614,
|
||||
"iat": 1420569714,
|
||||
"id": "1337",
|
||||
"iss": "accounts.google.com",
|
||||
"sub": "1337",
|
||||
"token_hash": "foo_token_hash",
|
||||
"verified_email": true
|
||||
},
|
||||
"invalid": false,
|
||||
"refresh_token": "foo_refresh_token",
|
||||
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
|
||||
"token_expiry": "2015-01-09T00:51:51Z",
|
||||
"token_response": {
|
||||
"access_token": "foo_access_token",
|
||||
"expires_in": 3600,
|
||||
"id_token": "foo_id_token",
|
||||
"token_type": "Bearer"
|
||||
},
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"user_agent": "Cloud SDK Command Line Tool"
|
||||
},
|
||||
"key": {
|
||||
"account": "foo@example.com",
|
||||
"clientId": "foo_client_id",
|
||||
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
|
||||
"type": "google-cloud-sdk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"credential": {
|
||||
"_class": "OAuth2Credentials",
|
||||
"_module": "oauth2client.client",
|
||||
"access_token": "bar_access_token",
|
||||
"client_id": "bar_client_id",
|
||||
"client_secret": "bar_client_secret",
|
||||
"id_token": {
|
||||
"at_hash": "bar_at_hash",
|
||||
"aud": "bar_aud",
|
||||
"azp": "bar_azp",
|
||||
"cid": "bar_cid",
|
||||
"email": "bar@example.com",
|
||||
"email_verified": true,
|
||||
"exp": 1420573614,
|
||||
"iat": 1420569714,
|
||||
"id": "1337",
|
||||
"iss": "accounts.google.com",
|
||||
"sub": "1337",
|
||||
"token_hash": "bar_token_hash",
|
||||
"verified_email": true
|
||||
},
|
||||
"invalid": false,
|
||||
"refresh_token": "bar_refresh_token",
|
||||
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
|
||||
"token_expiry": "2015-01-09T00:51:51Z",
|
||||
"token_response": {
|
||||
"access_token": "bar_access_token",
|
||||
"expires_in": 3600,
|
||||
"id_token": "bar_id_token",
|
||||
"token_type": "Bearer"
|
||||
},
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"user_agent": "Cloud SDK Command Line Tool"
|
||||
},
|
||||
"key": {
|
||||
"account": "bar@example.com",
|
||||
"clientId": "bar_client_id",
|
||||
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
|
||||
"type": "google-cloud-sdk"
|
||||
}
|
||||
},
|
||||
{
|
||||
"credential": {
|
||||
"_class": "ServiceAccountCredentials",
|
||||
"_kwargs": {},
|
||||
"_module": "oauth2client.client",
|
||||
"_private_key_id": "00000000000000000000000000000000",
|
||||
"_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n",
|
||||
"_revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
|
||||
"_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
|
||||
"_service_account_email": "baz@serviceaccount.example.com",
|
||||
"_service_account_id": "baz.serviceaccount.example.com",
|
||||
"_token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"_user_agent": "Cloud SDK Command Line Tool",
|
||||
"access_token": null,
|
||||
"assertion_type": null,
|
||||
"client_id": null,
|
||||
"client_secret": null,
|
||||
"id_token": null,
|
||||
"invalid": false,
|
||||
"refresh_token": null,
|
||||
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
|
||||
"service_account_name": "baz@serviceaccount.example.com",
|
||||
"token_expiry": null,
|
||||
"token_response": null,
|
||||
"user_agent": "Cloud SDK Command Line Tool"
|
||||
},
|
||||
"key": {
|
||||
"account": "baz@serviceaccount.example.com",
|
||||
"clientId": "baz_client_id",
|
||||
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
|
||||
"type": "google-cloud-sdk"
|
||||
}
|
||||
}
|
||||
],
|
||||
"file_version": 1
|
||||
}
|
||||
2
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
generated
vendored
Normal file
2
Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
[core]
|
||||
account = bar@example.com
|
||||
76
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
Normal file
76
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
|
||||
result := map[string]map[string]string{
|
||||
"": map[string]string{}, // root section
|
||||
}
|
||||
scanner := bufio.NewScanner(ini)
|
||||
currentSection := ""
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, ";") {
|
||||
// comment.
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||
currentSection = strings.TrimSpace(line[1 : len(line)-1])
|
||||
result[currentSection] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) == 2 && parts[0] != "" {
|
||||
result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning ini: %v", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func CondVal(v string) []string {
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{v}
|
||||
}
|
||||
62
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
generated
vendored
Normal file
62
Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseINI(t *testing.T) {
|
||||
tests := []struct {
|
||||
ini string
|
||||
want map[string]map[string]string
|
||||
}{
|
||||
{
|
||||
`root = toor
|
||||
[foo]
|
||||
bar = hop
|
||||
ini = nin
|
||||
`,
|
||||
map[string]map[string]string{
|
||||
"": map[string]string{"root": "toor"},
|
||||
"foo": map[string]string{"bar": "hop", "ini": "nin"},
|
||||
},
|
||||
},
|
||||
{
|
||||
`[empty]
|
||||
[section]
|
||||
empty=
|
||||
`,
|
||||
map[string]map[string]string{
|
||||
"": map[string]string{},
|
||||
"empty": map[string]string{},
|
||||
"section": map[string]string{"empty": ""},
|
||||
},
|
||||
},
|
||||
{
|
||||
`ignore
|
||||
[invalid
|
||||
=stuff
|
||||
;comment=true
|
||||
`,
|
||||
map[string]map[string]string{
|
||||
"": map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
result, err := ParseINI(strings.NewReader(tt.ini))
|
||||
if err != nil {
|
||||
t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(result, tt.want) {
|
||||
t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
214
Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go
generated
vendored
Normal file
214
Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
|
||||
}
|
||||
|
||||
func (e *tokenJSON) expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
if v := e.Expires; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type expirationTime int32
|
||||
|
||||
func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
||||
var n json.Number
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i, err := n.Int64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = expirationTime(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
var brokenAuthHeaderProviders = []string{
|
||||
"https://accounts.google.com/",
|
||||
"https://www.googleapis.com/",
|
||||
"https://api.instagram.com/",
|
||||
"https://www.douban.com/",
|
||||
"https://api.dropbox.com/",
|
||||
"https://api.soundcloud.com/",
|
||||
"https://www.linkedin.com/",
|
||||
"https://api.twitch.tv/",
|
||||
"https://oauth.vk.com/",
|
||||
"https://api.odnoklassniki.ru/",
|
||||
"https://connect.stripe.com/",
|
||||
"https://api.pushbullet.com/",
|
||||
"https://oauth.sandbox.trainingpeaks.com/",
|
||||
"https://oauth.trainingpeaks.com/",
|
||||
"https://www.strava.com/oauth/",
|
||||
"https://app.box.com/",
|
||||
"https://test-sandbox.auth.corp.google.com",
|
||||
"https://user.gini.net/",
|
||||
"https://api.netatmo.net/",
|
||||
"https://slack.com/",
|
||||
}
|
||||
|
||||
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
|
||||
// implements the OAuth2 spec correctly
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
// In summary:
|
||||
// - Reddit only accepts client secret in the Authorization header
|
||||
// - Dropbox accepts either it in URL param or Auth header, but not both.
|
||||
// - Google only accepts URL param (not spec compliant?), not Auth header
|
||||
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
|
||||
func providerAuthHeaderWorks(tokenURL string) bool {
|
||||
for _, s := range brokenAuthHeaderProviders {
|
||||
if strings.HasPrefix(tokenURL, s) {
|
||||
// Some sites fail to implement the OAuth2 spec fully.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Assume the provider implements the spec properly
|
||||
// otherwise. We can add more exceptions as they're
|
||||
// discovered. We will _not_ be adding configurable hooks
|
||||
// to this package to let users select server bugs.
|
||||
return true
|
||||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
|
||||
hc, err := ContextClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.Set("client_id", ClientID)
|
||||
bustedAuth := !providerAuthHeaderWorks(TokenURL)
|
||||
if bustedAuth && ClientSecret != "" {
|
||||
v.Set("client_secret", ClientSecret)
|
||||
}
|
||||
req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if !bustedAuth {
|
||||
req.SetBasicAuth(ClientID, ClientSecret)
|
||||
}
|
||||
r, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if code := r.StatusCode; code < 200 || code > 299 {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
|
||||
}
|
||||
|
||||
var token *Token
|
||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
switch content {
|
||||
case "application/x-www-form-urlencoded", "text/plain":
|
||||
vals, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: vals.Get("access_token"),
|
||||
TokenType: vals.Get("token_type"),
|
||||
RefreshToken: vals.Get("refresh_token"),
|
||||
Raw: vals,
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
if e == "" {
|
||||
// TODO(jbd): Facebook's OAuth2 implementation is broken and
|
||||
// returns expires_in field in expires. Remove the fallback to expires,
|
||||
// when Facebook fixes their implementation.
|
||||
e = vals.Get("expires")
|
||||
}
|
||||
expires, _ := strconv.Atoi(e)
|
||||
if expires != 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
|
||||
}
|
||||
default:
|
||||
var tj tokenJSON
|
||||
if err = json.Unmarshal(body, &tj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: tj.AccessToken,
|
||||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
if token.RefreshToken == "" {
|
||||
token.RefreshToken = v.Get("refresh_token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
28
Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go
generated
vendored
Normal file
28
Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_providerAuthHeaderWorks(t *testing.T) {
|
||||
for _, p := range brokenAuthHeaderProviders {
|
||||
if providerAuthHeaderWorks(p) {
|
||||
t.Errorf("URL: %s not found in list", p)
|
||||
}
|
||||
p := fmt.Sprintf("%ssomesuffix", p)
|
||||
if providerAuthHeaderWorks(p) {
|
||||
t.Errorf("URL: %s not found in list", p)
|
||||
}
|
||||
}
|
||||
p := "https://api.not-in-the-list-example.com/"
|
||||
if !providerAuthHeaderWorks(p) {
|
||||
t.Errorf("URL: %s found in list", p)
|
||||
}
|
||||
|
||||
}
|
||||
67
Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go
generated
vendored
Normal file
67
Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
// an immutable public variable with a unique type. It's immutable
|
||||
// because nobody else can create a ContextKey, being unexported.
|
||||
type ContextKey struct{}
|
||||
|
||||
// ContextClientFunc is a func which tries to return an *http.Client
|
||||
// given a Context value. If it returns an error, the search stops
|
||||
// with that error. If it returns (nil, nil), the search continues
|
||||
// down the list of registered funcs.
|
||||
type ContextClientFunc func(context.Context) (*http.Client, error)
|
||||
|
||||
var contextClientFuncs []ContextClientFunc
|
||||
|
||||
func RegisterContextClientFunc(fn ContextClientFunc) {
|
||||
contextClientFuncs = append(contextClientFuncs, fn)
|
||||
}
|
||||
|
||||
func ContextClient(ctx context.Context) (*http.Client, error) {
|
||||
for _, fn := range contextClientFuncs {
|
||||
c, err := fn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
|
||||
return hc, nil
|
||||
}
|
||||
return http.DefaultClient, nil
|
||||
}
|
||||
|
||||
func ContextTransport(ctx context.Context) http.RoundTripper {
|
||||
hc, err := ContextClient(ctx)
|
||||
// This is a rare error case (somebody using nil on App Engine).
|
||||
if err != nil {
|
||||
return ErrorTransport{err}
|
||||
}
|
||||
return hc.Transport
|
||||
}
|
||||
|
||||
// ErrorTransport returns the specified error on RoundTrip.
|
||||
// This RoundTripper should be used in rare error cases where
|
||||
// error handling can be postponed to response handling time.
|
||||
type ErrorTransport struct{ Err error }
|
||||
|
||||
func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
return nil, t.Err
|
||||
}
|
||||
159
Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
generated
vendored
Normal file
159
Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jws provides encoding and decoding utilities for
|
||||
// signed JWS messages.
|
||||
package jws // import "golang.org/x/oauth2/jws"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClaimSet contains information about the JWT signature including the
|
||||
// permissions being requested (scopes), the target of the token, the issuer,
|
||||
// the time the token was issued, and the lifetime of the token.
|
||||
type ClaimSet struct {
|
||||
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
|
||||
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
|
||||
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
|
||||
Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch)
|
||||
Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch)
|
||||
Typ string `json:"typ,omitempty"` // token type (Optional).
|
||||
|
||||
// Email for which the application is requesting delegated access (Optional).
|
||||
Sub string `json:"sub,omitempty"`
|
||||
|
||||
// The old name of Sub. Client keeps setting Prn to be
|
||||
// complaint with legacy OAuth 2.0 providers. (Optional)
|
||||
Prn string `json:"prn,omitempty"`
|
||||
|
||||
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
|
||||
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
|
||||
PrivateClaims map[string]interface{} `json:"-"`
|
||||
}
|
||||
|
||||
func (c *ClaimSet) encode() (string, error) {
|
||||
// Reverting time back for machines whose time is not perfectly in sync.
|
||||
// If client machine's time is in the future according
|
||||
// to Google servers, an access token will not be issued.
|
||||
now := time.Now().Add(-10 * time.Second)
|
||||
if c.Iat == 0 {
|
||||
c.Iat = now.Unix()
|
||||
}
|
||||
if c.Exp == 0 {
|
||||
c.Exp = now.Add(time.Hour).Unix()
|
||||
}
|
||||
if c.Exp < c.Iat {
|
||||
return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(c.PrivateClaims) == 0 {
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Marshal private claim set and then append it to b.
|
||||
prv, err := json.Marshal(c.PrivateClaims)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
|
||||
}
|
||||
|
||||
// Concatenate public and private claim JSON objects.
|
||||
if !bytes.HasSuffix(b, []byte{'}'}) {
|
||||
return "", fmt.Errorf("jws: invalid JSON %s", b)
|
||||
}
|
||||
if !bytes.HasPrefix(prv, []byte{'{'}) {
|
||||
return "", fmt.Errorf("jws: invalid JSON %s", prv)
|
||||
}
|
||||
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
|
||||
b = append(b, prv[1:]...) // Append private claims.
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Header represents the header for the signed JWS payloads.
|
||||
type Header struct {
|
||||
// The algorithm used for signature.
|
||||
Algorithm string `json:"alg"`
|
||||
|
||||
// Represents the token type.
|
||||
Typ string `json:"typ"`
|
||||
}
|
||||
|
||||
func (h *Header) encode() (string, error) {
|
||||
b, err := json.Marshal(h)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64Encode(b), nil
|
||||
}
|
||||
|
||||
// Decode decodes a claim set from a JWS payload.
|
||||
func Decode(payload string) (*ClaimSet, error) {
|
||||
// decode returned id token to get expiry
|
||||
s := strings.Split(payload, ".")
|
||||
if len(s) < 2 {
|
||||
// TODO(jbd): Provide more context about the error.
|
||||
return nil, errors.New("jws: invalid token received")
|
||||
}
|
||||
decoded, err := base64Decode(s[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ClaimSet{}
|
||||
err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Encode encodes a signed JWS with provided header and claim set.
|
||||
func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
|
||||
head, err := header.encode()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cs, err := c.encode()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ss := fmt.Sprintf("%s.%s", head, cs)
|
||||
h := sha256.New()
|
||||
h.Write([]byte(ss))
|
||||
b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := base64Encode(b)
|
||||
return fmt.Sprintf("%s.%s", ss, sig), nil
|
||||
}
|
||||
|
||||
// base64Encode returns and Base64url encoded version of the input string with any
|
||||
// trailing "=" stripped.
|
||||
func base64Encode(b []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// base64Decode decodes the Base64url encoded string
|
||||
func base64Decode(s string) ([]byte, error) {
|
||||
// add back missing padding
|
||||
switch len(s) % 4 {
|
||||
case 2:
|
||||
s += "=="
|
||||
case 3:
|
||||
s += "="
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(s)
|
||||
}
|
||||
31
Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
generated
vendored
Normal file
31
Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jwt_test
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
)
|
||||
|
||||
func ExampleJWTConfig() {
|
||||
conf := &jwt.Config{
|
||||
Email: "xxx@developer.com",
|
||||
// The contents of your RSA private key or your PEM file
|
||||
// that contains a private key.
|
||||
// If you have a p12 file instead, you
|
||||
// can use `openssl` to export the private key into a pem file.
|
||||
//
|
||||
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
|
||||
//
|
||||
// It only supports PEM containers with no passphrase.
|
||||
PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
|
||||
Subject: "user@example.com",
|
||||
TokenURL: "https://provider.com/o/oauth2/token",
|
||||
}
|
||||
// Initiate an http.Client, the following GET request will be
|
||||
// authorized and authenticated on the behalf of user@example.com.
|
||||
client := conf.Client(oauth2.NoContext)
|
||||
client.Get("...")
|
||||
}
|
||||
153
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
generated
vendored
Normal file
153
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
|
||||
// known as "two-legged OAuth 2.0".
|
||||
//
|
||||
// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
|
||||
defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
|
||||
)
|
||||
|
||||
// Config is the configuration for using JWT to fetch tokens,
|
||||
// commonly known as "two-legged OAuth 2.0".
|
||||
type Config struct {
|
||||
// Email is the OAuth client identifier used when communicating with
|
||||
// the configured OAuth provider.
|
||||
Email string
|
||||
|
||||
// PrivateKey contains the contents of an RSA private key or the
|
||||
// contents of a PEM file that contains a private key. The provided
|
||||
// private key is used to sign JWT payloads.
|
||||
// PEM containers with a passphrase are not supported.
|
||||
// Use the following command to convert a PKCS 12 file into a PEM.
|
||||
//
|
||||
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
|
||||
//
|
||||
PrivateKey []byte
|
||||
|
||||
// Subject is the optional user to impersonate.
|
||||
Subject string
|
||||
|
||||
// Scopes optionally specifies a list of requested permission scopes.
|
||||
Scopes []string
|
||||
|
||||
// TokenURL is the endpoint required to complete the 2-legged JWT flow.
|
||||
TokenURL string
|
||||
|
||||
// Expires optionally specifies how long the token is valid for.
|
||||
Expires time.Duration
|
||||
}
|
||||
|
||||
// TokenSource returns a JWT TokenSource using the configuration
|
||||
// in c and the HTTP client from the provided context.
|
||||
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
|
||||
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client wrapping the context's
|
||||
// HTTP transport and adding Authorization headers with tokens
|
||||
// obtained from c.
|
||||
//
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context) *http.Client {
|
||||
return oauth2.NewClient(ctx, c.TokenSource(ctx))
|
||||
}
|
||||
|
||||
// jwtSource is a source that always does a signed JWT request for a token.
|
||||
// It should typically be wrapped with a reuseTokenSource.
|
||||
type jwtSource struct {
|
||||
ctx context.Context
|
||||
conf *Config
|
||||
}
|
||||
|
||||
func (js jwtSource) Token() (*oauth2.Token, error) {
|
||||
pk, err := internal.ParseKey(js.conf.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hc := oauth2.NewClient(js.ctx, nil)
|
||||
claimSet := &jws.ClaimSet{
|
||||
Iss: js.conf.Email,
|
||||
Scope: strings.Join(js.conf.Scopes, " "),
|
||||
Aud: js.conf.TokenURL,
|
||||
}
|
||||
if subject := js.conf.Subject; subject != "" {
|
||||
claimSet.Sub = subject
|
||||
// prn is the old name of sub. Keep setting it
|
||||
// to be compatible with legacy OAuth 2.0 providers.
|
||||
claimSet.Prn = subject
|
||||
}
|
||||
if t := js.conf.Expires; t > 0 {
|
||||
claimSet.Exp = time.Now().Add(t).Unix()
|
||||
}
|
||||
payload, err := jws.Encode(defaultHeader, claimSet, pk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("grant_type", defaultGrantType)
|
||||
v.Set("assertion", payload)
|
||||
resp, err := hc.PostForm(js.conf.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if c := resp.StatusCode; c < 200 || c > 299 {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
|
||||
}
|
||||
// tokenRes is the JSON response body.
|
||||
var tokenRes struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
|
||||
}
|
||||
if err := json.Unmarshal(body, &tokenRes); err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
token := &oauth2.Token{
|
||||
AccessToken: tokenRes.AccessToken,
|
||||
TokenType: tokenRes.TokenType,
|
||||
}
|
||||
raw := make(map[string]interface{})
|
||||
json.Unmarshal(body, &raw) // no error checks for optional fields
|
||||
token = token.WithExtra(raw)
|
||||
|
||||
if secs := tokenRes.ExpiresIn; secs > 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
|
||||
}
|
||||
if v := tokenRes.IDToken; v != "" {
|
||||
// decode returned id token to get expiry
|
||||
claimSet, err := jws.Decode(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
|
||||
}
|
||||
token.Expiry = time.Unix(claimSet.Exp, 0)
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
134
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
generated
vendored
Normal file
134
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
|
||||
DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
|
||||
fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
|
||||
1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
|
||||
k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
|
||||
/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
|
||||
3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
|
||||
2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
|
||||
nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
|
||||
6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
|
||||
5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
|
||||
DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
|
||||
M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
|
||||
z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
|
||||
1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
|
||||
J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
|
||||
f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
|
||||
QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
|
||||
cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
|
||||
Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
|
||||
5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
|
||||
KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
|
||||
OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
|
||||
mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
|
||||
5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
|
||||
-----END RSA PRIVATE KEY-----`)
|
||||
|
||||
func TestJWTFetch_JSONResponse(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{
|
||||
"access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
|
||||
"scope": "user",
|
||||
"token_type": "bearer",
|
||||
"expires_in": 3600
|
||||
}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
conf := &Config{
|
||||
Email: "aaa@xxx.com",
|
||||
PrivateKey: dummyPrivateKey,
|
||||
TokenURL: ts.URL,
|
||||
}
|
||||
tok, err := conf.TokenSource(oauth2.NoContext).Token()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Errorf("Token invalid")
|
||||
}
|
||||
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
|
||||
t.Errorf("Unexpected access token, %#v", tok.AccessToken)
|
||||
}
|
||||
if tok.TokenType != "bearer" {
|
||||
t.Errorf("Unexpected token type, %#v", tok.TokenType)
|
||||
}
|
||||
if tok.Expiry.IsZero() {
|
||||
t.Errorf("Unexpected token expiry, %#v", tok.Expiry)
|
||||
}
|
||||
scope := tok.Extra("scope")
|
||||
if scope != "user" {
|
||||
t.Errorf("Unexpected value for scope: %v", scope)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWTFetch_BadResponse(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
conf := &Config{
|
||||
Email: "aaa@xxx.com",
|
||||
PrivateKey: dummyPrivateKey,
|
||||
TokenURL: ts.URL,
|
||||
}
|
||||
tok, err := conf.TokenSource(oauth2.NoContext).Token()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tok == nil {
|
||||
t.Fatalf("token is nil")
|
||||
}
|
||||
if tok.Valid() {
|
||||
t.Errorf("token is valid. want invalid.")
|
||||
}
|
||||
if tok.AccessToken != "" {
|
||||
t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken)
|
||||
}
|
||||
if want := "bearer"; tok.TokenType != want {
|
||||
t.Errorf("TokenType = %q; want %q", tok.TokenType, want)
|
||||
}
|
||||
scope := tok.Extra("scope")
|
||||
if want := "user"; scope != want {
|
||||
t.Errorf("token scope = %q; want %q", scope, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWTFetch_BadResponseType(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := &Config{
|
||||
Email: "aaa@xxx.com",
|
||||
PrivateKey: dummyPrivateKey,
|
||||
TokenURL: ts.URL,
|
||||
}
|
||||
tok, err := conf.TokenSource(oauth2.NoContext).Token()
|
||||
if err == nil {
|
||||
t.Error("got a token; expected error")
|
||||
if tok.AccessToken != "" {
|
||||
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package linkedin provides constants for using OAuth2 to access LinkedIn.
|
||||
package linkedin // import "golang.org/x/oauth2/linkedin"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is LinkedIn's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://www.linkedin.com/uas/oauth2/authorization",
|
||||
TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
|
||||
}
|
||||
325
Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
generated
vendored
Normal file
325
Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package oauth2 provides support for making
|
||||
// OAuth2 authorized and authenticated HTTP requests.
|
||||
// It can additionally grant authorization with Bearer JWT.
|
||||
package oauth2 // import "golang.org/x/oauth2"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
Endpoint Endpoint
|
||||
|
||||
// RedirectURL is the URL to redirect users going through
|
||||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// A TokenSource is anything that can return a token.
|
||||
type TokenSource interface {
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
Token() (*Token, error)
|
||||
}
|
||||
|
||||
// Endpoint contains the OAuth 2.0 provider's authorization and token
|
||||
// endpoint URLs.
|
||||
type Endpoint struct {
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
}
|
||||
|
||||
var (
|
||||
// AccessTypeOnline and AccessTypeOffline are options passed
|
||||
// to the Options.AuthCodeURL method. They modify the
|
||||
// "access_type" field that gets sent in the URL returned by
|
||||
// AuthCodeURL.
|
||||
//
|
||||
// Online is the default if neither is specified. If your
|
||||
// application needs to refresh access tokens when the user
|
||||
// is not present at the browser, then use offline. This will
|
||||
// result in your application obtaining a refresh token the
|
||||
// first time your application exchanges an authorization
|
||||
// code for a user.
|
||||
AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
|
||||
AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
|
||||
|
||||
// ApprovalForce forces the users to view the consent dialog
|
||||
// and confirm the permissions request at the URL returned
|
||||
// from AuthCodeURL, even if they've already done so.
|
||||
ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
|
||||
)
|
||||
|
||||
// An AuthCodeOption is passed to Config.AuthCodeURL.
|
||||
type AuthCodeOption interface {
|
||||
setValue(url.Values)
|
||||
}
|
||||
|
||||
type setParam struct{ k, v string }
|
||||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
}
|
||||
|
||||
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
|
||||
// that asks for permissions for the required scopes explicitly.
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-zero string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(c.Endpoint.AuthURL)
|
||||
v := url.Values{
|
||||
"response_type": {"code"},
|
||||
"client_id": {c.ClientID},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
"state": internal.CondVal(state),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
if strings.Contains(c.Endpoint.AuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PasswordCredentialsToken converts a resource owner username and password
|
||||
// pair into a token.
|
||||
//
|
||||
// Per the RFC, this grant type should only be used "when there is a high
|
||||
// degree of trust between the resource owner and the client (e.g., the client
|
||||
// is part of the device operating system or a highly privileged application),
|
||||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If nil, http.DefaultClient is used.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"password"},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Exchange converts an authorization code into a token.
|
||||
//
|
||||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If a client is not provided via the context, http.DefaultClient is used.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state").
|
||||
func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"code": {code},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
||||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
if t != nil {
|
||||
tkr.refreshToken = t.RefreshToken
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: tkr,
|
||||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
conf *Config
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
// WARNING: Token is not safe for concurrent access, as it
|
||||
// updates the tokenRefresher's refreshToken field.
|
||||
// Within this package, it is used by reuseTokenSource which
|
||||
// synchronizes calls to this method with its own mutex.
|
||||
func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken == "" {
|
||||
return nil, errors.New("oauth2: token expired and refresh token is not set")
|
||||
}
|
||||
|
||||
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {tf.refreshToken},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
// and validates its expiry before each call to retrieve it with
|
||||
// Token. If it's expired, it will be auto-refreshed using the
|
||||
// new TokenSource.
|
||||
type reuseTokenSource struct {
|
||||
new TokenSource // called when t is expired.
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.t.Valid() {
|
||||
return s.t, nil
|
||||
}
|
||||
t, err := s.new.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
return staticTokenSource{t}
|
||||
}
|
||||
|
||||
// staticTokenSource is a TokenSource that always returns the same Token.
|
||||
type staticTokenSource struct {
|
||||
t *Token
|
||||
}
|
||||
|
||||
func (s staticTokenSource) Token() (*Token, error) {
|
||||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
// packages.
|
||||
func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
c, err := internal.ContextClient(ctx)
|
||||
if err != nil {
|
||||
return &http.Client{Transport: internal.ErrorTransport{err}}
|
||||
}
|
||||
return c
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextTransport(ctx),
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
// ReuseTokenSource is typically used to reuse tokens from a cache
|
||||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly.
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
}
|
||||
}
|
||||
471
Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
generated
vendored
Normal file
471
Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
generated
vendored
Normal file
@@ -0,0 +1,471 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type mockTransport struct {
|
||||
rt func(req *http.Request) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
return t.rt(req)
|
||||
}
|
||||
|
||||
type mockCache struct {
|
||||
token *Token
|
||||
readErr error
|
||||
}
|
||||
|
||||
func (c *mockCache) ReadToken() (*Token, error) {
|
||||
return c.token, c.readErr
|
||||
}
|
||||
|
||||
func (c *mockCache) WriteToken(*Token) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
func newConf(url string) *Config {
|
||||
return &Config{
|
||||
ClientID: "CLIENT_ID",
|
||||
ClientSecret: "CLIENT_SECRET",
|
||||
RedirectURL: "REDIRECT_URL",
|
||||
Scopes: []string{"scope1", "scope2"},
|
||||
Endpoint: Endpoint{
|
||||
AuthURL: url + "/auth",
|
||||
TokenURL: url + "/token",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCodeURL(t *testing.T) {
|
||||
conf := newConf("server")
|
||||
url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
|
||||
if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" {
|
||||
t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCodeURL_CustomParam(t *testing.T) {
|
||||
conf := newConf("server")
|
||||
param := SetAuthURLParam("foo", "bar")
|
||||
url := conf.AuthCodeURL("baz", param)
|
||||
if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" {
|
||||
t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCodeURL_Optional(t *testing.T) {
|
||||
conf := &Config{
|
||||
ClientID: "CLIENT_ID",
|
||||
Endpoint: Endpoint{
|
||||
AuthURL: "/auth-url",
|
||||
TokenURL: "/token-url",
|
||||
},
|
||||
}
|
||||
url := conf.AuthCodeURL("")
|
||||
if url != "/auth-url?client_id=CLIENT_ID&response_type=code" {
|
||||
t.Fatalf("Auth code URL doesn't match the expected, found: %v", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeRequest(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
|
||||
}
|
||||
headerAuth := r.Header.Get("Authorization")
|
||||
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
|
||||
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
if headerContentType != "application/x-www-form-urlencoded" {
|
||||
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Failed reading request body: %s.", err)
|
||||
}
|
||||
if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
|
||||
t.Errorf("Unexpected exchange payload, %v is found.", string(body))
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tok, err := conf.Exchange(NoContext, "exchange-code")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Fatalf("Token invalid. Got: %#v", tok)
|
||||
}
|
||||
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
|
||||
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
|
||||
}
|
||||
if tok.TokenType != "bearer" {
|
||||
t.Errorf("Unexpected token type, %#v.", tok.TokenType)
|
||||
}
|
||||
scope := tok.Extra("scope")
|
||||
if scope != "user" {
|
||||
t.Errorf("Unexpected value for scope: %v", scope)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeRequest_JSONResponse(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
|
||||
}
|
||||
headerAuth := r.Header.Get("Authorization")
|
||||
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
|
||||
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
if headerContentType != "application/x-www-form-urlencoded" {
|
||||
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Failed reading request body: %s.", err)
|
||||
}
|
||||
if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
|
||||
t.Errorf("Unexpected exchange payload, %v is found.", string(body))
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tok, err := conf.Exchange(NoContext, "exchange-code")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Fatalf("Token invalid. Got: %#v", tok)
|
||||
}
|
||||
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
|
||||
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
|
||||
}
|
||||
if tok.TokenType != "bearer" {
|
||||
t.Errorf("Unexpected token type, %#v.", tok.TokenType)
|
||||
}
|
||||
scope := tok.Extra("scope")
|
||||
if scope != "user" {
|
||||
t.Errorf("Unexpected value for scope: %v", scope)
|
||||
}
|
||||
expiresIn := tok.Extra("expires_in")
|
||||
if expiresIn != float64(86400) {
|
||||
t.Errorf("Unexpected non-numeric value for expires_in: %v", expiresIn)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtraValueRetrieval(t *testing.T) {
|
||||
values := url.Values{}
|
||||
|
||||
kvmap := map[string]string{
|
||||
"scope": "user", "token_type": "bearer", "expires_in": "86400.92",
|
||||
"server_time": "1443571905.5606415", "referer_ip": "10.0.0.1",
|
||||
"etag": "\"afZYj912P4alikMz_P11982\"", "request_id": "86400",
|
||||
"untrimmed": " untrimmed ",
|
||||
}
|
||||
|
||||
for key, value := range kvmap {
|
||||
values.Set(key, value)
|
||||
}
|
||||
|
||||
tok := Token{
|
||||
raw: values,
|
||||
}
|
||||
|
||||
scope := tok.Extra("scope")
|
||||
if scope != "user" {
|
||||
t.Errorf("Unexpected scope %v wanted \"user\"", scope)
|
||||
}
|
||||
serverTime := tok.Extra("server_time")
|
||||
if serverTime != 1443571905.5606415 {
|
||||
t.Errorf("Unexpected non-float64 value for server_time: %v", serverTime)
|
||||
}
|
||||
refererIp := tok.Extra("referer_ip")
|
||||
if refererIp != "10.0.0.1" {
|
||||
t.Errorf("Unexpected non-string value for referer_ip: %v", refererIp)
|
||||
}
|
||||
expires_in := tok.Extra("expires_in")
|
||||
if expires_in != 86400.92 {
|
||||
t.Errorf("Unexpected value for expires_in, wanted 86400 got %v", expires_in)
|
||||
}
|
||||
requestId := tok.Extra("request_id")
|
||||
if requestId != int64(86400) {
|
||||
t.Errorf("Unexpected non-int64 value for request_id: %v", requestId)
|
||||
}
|
||||
untrimmed := tok.Extra("untrimmed")
|
||||
if untrimmed != " untrimmed " {
|
||||
t.Errorf("Unexpected value for untrimmed, got %q expected \" untrimmed \"", untrimmed)
|
||||
}
|
||||
}
|
||||
|
||||
const day = 24 * time.Hour
|
||||
|
||||
func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) {
|
||||
seconds := int32(day.Seconds())
|
||||
jsonNumberType := reflect.TypeOf(json.Number("0"))
|
||||
for _, c := range []struct {
|
||||
expires string
|
||||
expect error
|
||||
}{
|
||||
{fmt.Sprintf(`"expires_in": %d`, seconds), nil},
|
||||
{fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case
|
||||
{fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case
|
||||
{`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type
|
||||
{`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type
|
||||
{`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value
|
||||
} {
|
||||
testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect)
|
||||
}
|
||||
}
|
||||
|
||||
func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp)))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
t1 := time.Now().Add(day)
|
||||
tok, err := conf.Exchange(NoContext, "exchange-code")
|
||||
t2 := time.Now().Add(day)
|
||||
// Do a fmt.Sprint comparison so either side can be
|
||||
// nil. fmt.Sprint just stringifies them to "<nil>", and no
|
||||
// non-nil expected error ever stringifies as "<nil>", so this
|
||||
// isn't terribly disgusting. We do this because Go 1.4 and
|
||||
// Go 1.5 return a different deep value for
|
||||
// json.UnmarshalTypeError. In Go 1.5, the
|
||||
// json.UnmarshalTypeError contains a new field with a new
|
||||
// non-zero value. Rather than ignore it here with reflect or
|
||||
// add new files and +build tags, just look at the strings.
|
||||
if fmt.Sprint(err) != fmt.Sprint(expect) {
|
||||
t.Errorf("Error = %v; want %v", err, expect)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Fatalf("Token invalid. Got: %#v", tok)
|
||||
}
|
||||
expiry := tok.Expiry
|
||||
if expiry.Before(t1) || expiry.After(t2) {
|
||||
t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeRequest_BadResponse(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tok, err := conf.Exchange(NoContext, "code")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tok.AccessToken != "" {
|
||||
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeRequest_BadResponseType(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
_, err := conf.Exchange(NoContext, "exchange-code")
|
||||
if err == nil {
|
||||
t.Error("expected error from invalid access_token type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExchangeRequest_NonBasicAuth(t *testing.T) {
|
||||
tr := &mockTransport{
|
||||
rt: func(r *http.Request) (w *http.Response, err error) {
|
||||
headerAuth := r.Header.Get("Authorization")
|
||||
if headerAuth != "" {
|
||||
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
|
||||
}
|
||||
return nil, errors.New("no response")
|
||||
},
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
conf := &Config{
|
||||
ClientID: "CLIENT_ID",
|
||||
Endpoint: Endpoint{
|
||||
AuthURL: "https://accounts.google.com/auth",
|
||||
TokenURL: "https://accounts.google.com/token",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.Background(), HTTPClient, c)
|
||||
conf.Exchange(ctx, "code")
|
||||
}
|
||||
|
||||
func TestPasswordCredentialsTokenRequest(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
expected := "/token"
|
||||
if r.URL.String() != expected {
|
||||
t.Errorf("URL = %q; want %q", r.URL, expected)
|
||||
}
|
||||
headerAuth := r.Header.Get("Authorization")
|
||||
expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
|
||||
if headerAuth != expected {
|
||||
t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
expected = "application/x-www-form-urlencoded"
|
||||
if headerContentType != expected {
|
||||
t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
|
||||
}
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Errorf("Failed reading request body: %s.", err)
|
||||
}
|
||||
expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1"
|
||||
if string(body) != expected {
|
||||
t.Errorf("res.Body = %q; want %q", string(body), expected)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !tok.Valid() {
|
||||
t.Fatalf("Token invalid. Got: %#v", tok)
|
||||
}
|
||||
expected := "90d64460d14870c08c81352a05dedd3465940a7c"
|
||||
if tok.AccessToken != expected {
|
||||
t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected)
|
||||
}
|
||||
expected = "bearer"
|
||||
if tok.TokenType != expected {
|
||||
t.Errorf("TokenType = %q; want %q", tok.TokenType, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenRefreshRequest(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() == "/somethingelse" {
|
||||
return
|
||||
}
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
if headerContentType != "application/x-www-form-urlencoded" {
|
||||
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
|
||||
}
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
|
||||
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"})
|
||||
c.Get(ts.URL + "/somethingelse")
|
||||
}
|
||||
|
||||
func TestFetchWithNoRefreshToken(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.String() == "/somethingelse" {
|
||||
return
|
||||
}
|
||||
if r.URL.String() != "/token" {
|
||||
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
|
||||
}
|
||||
headerContentType := r.Header.Get("Content-Type")
|
||||
if headerContentType != "application/x-www-form-urlencoded" {
|
||||
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
|
||||
}
|
||||
body, _ := ioutil.ReadAll(r.Body)
|
||||
if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
|
||||
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
c := conf.Client(NoContext, nil)
|
||||
_, err := c.Get(ts.URL + "/somethingelse")
|
||||
if err == nil {
|
||||
t.Errorf("Fetch should return an error if no refresh token is set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshToken_RefreshTokenReplacement(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`))
|
||||
return
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
tkr := tokenRefresher{
|
||||
conf: conf,
|
||||
ctx: NoContext,
|
||||
refreshToken: "OLD REFRESH TOKEN",
|
||||
}
|
||||
tk, err := tkr.Token()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected refreshToken error returned: %v", err)
|
||||
return
|
||||
}
|
||||
if tk.RefreshToken != tkr.refreshToken {
|
||||
t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigClientWithToken(t *testing.T) {
|
||||
tok := &Token{
|
||||
AccessToken: "abc123",
|
||||
}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want {
|
||||
t.Errorf("Authorization header = %q; want %q", got, want)
|
||||
}
|
||||
return
|
||||
}))
|
||||
defer ts.Close()
|
||||
conf := newConf(ts.URL)
|
||||
|
||||
c := conf.Client(NoContext, tok)
|
||||
req, err := http.NewRequest("GET", ts.URL, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
_, err = c.Do(req)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
|
||||
package odnoklassniki // import "golang.org/x/oauth2/odnoklassniki"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
|
||||
TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
|
||||
}
|
||||
22
Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
generated
vendored
Normal file
22
Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package paypal provides constants for using OAuth2 to access PayPal.
|
||||
package paypal // import "golang.org/x/oauth2/paypal"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
|
||||
TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
|
||||
}
|
||||
|
||||
// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
|
||||
var SandboxEndpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
|
||||
TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
|
||||
}
|
||||
158
Godeps/_workspace/src/golang.org/x/oauth2/token.go
generated
vendored
Normal file
158
Godeps/_workspace/src/golang.org/x/oauth2/token.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// Most users of this package should not access fields of Token
|
||||
// directly. They're exported mostly for use by related packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string `json:"access_token"`
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string `json:"token_type,omitempty"`
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string `json:"refresh_token,omitempty"`
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
func (t *Token) Type() string {
|
||||
if strings.EqualFold(t.TokenType, "bearer") {
|
||||
return "Bearer"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "mac") {
|
||||
return "MAC"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "basic") {
|
||||
return "Basic"
|
||||
}
|
||||
if t.TokenType != "" {
|
||||
return t.TokenType
|
||||
}
|
||||
return "Bearer"
|
||||
}
|
||||
|
||||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
return t2
|
||||
}
|
||||
|
||||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
vals, ok := t.raw.(url.Values)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vals.Get(key)
|
||||
switch s := strings.TrimSpace(v); strings.Count(s, ".") {
|
||||
case 0: // Contains no "."; try to parse as int
|
||||
if i, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return i
|
||||
}
|
||||
case 1: // Contains a single "."; try to parse as float
|
||||
if f, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
return t.Expiry.Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *Token) Valid() bool {
|
||||
return t != nil && t.AccessToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// a *Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
|
||||
// with an error..
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
||||
72
Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
generated
vendored
Normal file
72
Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTokenExtra(t *testing.T) {
|
||||
type testCase struct {
|
||||
key string
|
||||
val interface{}
|
||||
want interface{}
|
||||
}
|
||||
const key = "extra-key"
|
||||
cases := []testCase{
|
||||
{key: key, val: "abc", want: "abc"},
|
||||
{key: key, val: 123, want: 123},
|
||||
{key: key, val: "", want: ""},
|
||||
{key: "other-key", val: "def", want: nil},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
extra := make(map[string]interface{})
|
||||
extra[tc.key] = tc.val
|
||||
tok := &Token{raw: extra}
|
||||
if got, want := tok.Extra(key), tc.want; got != want {
|
||||
t.Errorf("Extra(%q) = %q; want %q", key, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenExpiry(t *testing.T) {
|
||||
now := time.Now()
|
||||
cases := []struct {
|
||||
name string
|
||||
tok *Token
|
||||
want bool
|
||||
}{
|
||||
{name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false},
|
||||
{name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true},
|
||||
{name: "-1 hour", tok: &Token{Expiry: now.Add(-1 * time.Hour)}, want: true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if got, want := tc.tok.expired(), tc.want; got != want {
|
||||
t.Errorf("expired (%q) = %v; want %v", tc.name, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenTypeMethod(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
tok *Token
|
||||
want string
|
||||
}{
|
||||
{name: "bearer-mixed_case", tok: &Token{TokenType: "beAREr"}, want: "Bearer"},
|
||||
{name: "default-bearer", tok: &Token{}, want: "Bearer"},
|
||||
{name: "basic", tok: &Token{TokenType: "basic"}, want: "Basic"},
|
||||
{name: "basic-capitalized", tok: &Token{TokenType: "Basic"}, want: "Basic"},
|
||||
{name: "mac", tok: &Token{TokenType: "mac"}, want: "MAC"},
|
||||
{name: "mac-caps", tok: &Token{TokenType: "MAC"}, want: "MAC"},
|
||||
{name: "mac-mixed_case", tok: &Token{TokenType: "mAc"}, want: "MAC"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
if got, want := tc.tok.Type(), tc.want; got != want {
|
||||
t.Errorf("TokenType(%q) = %v; want %v", tc.name, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
132
Godeps/_workspace/src/golang.org/x/oauth2/transport.go
generated
vendored
Normal file
132
Godeps/_workspace/src/golang.org/x/oauth2/transport.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
Source TokenSource
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
|
||||
mu sync.Mutex // guards modReq
|
||||
modReq map[*http.Request]*http.Request // original -> modified
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
// access token. If no token exists or token is expired,
|
||||
// tries to refresh/fetch a new token.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if t.Source == nil {
|
||||
return nil, errors.New("oauth2: Transport's Source is nil")
|
||||
}
|
||||
token, err := t.Source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
t.setModReq(req, req2)
|
||||
res, err := t.base().RoundTrip(req2)
|
||||
if err != nil {
|
||||
t.setModReq(req, nil)
|
||||
return nil, err
|
||||
}
|
||||
res.Body = &onEOFReader{
|
||||
rc: res.Body,
|
||||
fn: func() { t.setModReq(req, nil) },
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its connection.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
type canceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
if cr, ok := t.base().(canceler); ok {
|
||||
t.mu.Lock()
|
||||
modReq := t.modReq[req]
|
||||
delete(t.modReq, req)
|
||||
t.mu.Unlock()
|
||||
cr.CancelRequest(modReq)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
func (t *Transport) setModReq(orig, mod *http.Request) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.modReq == nil {
|
||||
t.modReq = make(map[*http.Request]*http.Request)
|
||||
}
|
||||
if mod == nil {
|
||||
delete(t.modReq, orig)
|
||||
} else {
|
||||
t.modReq[orig] = mod
|
||||
}
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type onEOFReader struct {
|
||||
rc io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Close() error {
|
||||
err := r.rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *onEOFReader) runFunc() {
|
||||
if fn := r.fn; fn != nil {
|
||||
fn()
|
||||
r.fn = nil
|
||||
}
|
||||
}
|
||||
86
Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
generated
vendored
Normal file
86
Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tokenSource struct{ token *Token }
|
||||
|
||||
func (t *tokenSource) Token() (*Token, error) {
|
||||
return t.token, nil
|
||||
}
|
||||
|
||||
func TestTransportTokenSource(t *testing.T) {
|
||||
ts := &tokenSource{
|
||||
token: &Token{
|
||||
AccessToken: "abc",
|
||||
},
|
||||
}
|
||||
tr := &Transport{
|
||||
Source: ts,
|
||||
}
|
||||
server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Header.Get("Authorization") != "Bearer abc" {
|
||||
t.Errorf("Transport doesn't set the Authorization header from the fetched token")
|
||||
}
|
||||
})
|
||||
defer server.Close()
|
||||
client := http.Client{Transport: tr}
|
||||
client.Get(server.URL)
|
||||
}
|
||||
|
||||
// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113
|
||||
func TestTransportTokenSourceTypes(t *testing.T) {
|
||||
const val = "abc"
|
||||
tests := []struct {
|
||||
key string
|
||||
val string
|
||||
want string
|
||||
}{
|
||||
{key: "bearer", val: val, want: "Bearer abc"},
|
||||
{key: "mac", val: val, want: "MAC abc"},
|
||||
{key: "basic", val: val, want: "Basic abc"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
ts := &tokenSource{
|
||||
token: &Token{
|
||||
AccessToken: tc.val,
|
||||
TokenType: tc.key,
|
||||
},
|
||||
}
|
||||
tr := &Transport{
|
||||
Source: ts,
|
||||
}
|
||||
server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
|
||||
if got, want := r.Header.Get("Authorization"), tc.want; got != want {
|
||||
t.Errorf("Authorization header (%q) = %q; want %q", val, got, want)
|
||||
}
|
||||
})
|
||||
defer server.Close()
|
||||
client := http.Client{Transport: tr}
|
||||
client.Get(server.URL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTokenValidNoAccessToken(t *testing.T) {
|
||||
token := &Token{}
|
||||
if token.Valid() {
|
||||
t.Errorf("Token should not be valid with no access token")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpiredWithExpiry(t *testing.T) {
|
||||
token := &Token{
|
||||
Expiry: time.Now().Add(-5 * time.Hour),
|
||||
}
|
||||
if token.Valid() {
|
||||
t.Errorf("Token should not be valid if it expired in the past")
|
||||
}
|
||||
}
|
||||
|
||||
func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(handler))
|
||||
}
|
||||
16
Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
generated
vendored
Normal file
16
Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package vk provides constants for using OAuth2 to access VK.com.
|
||||
package vk // import "golang.org/x/oauth2/vk"
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Endpoint is VK's OAuth 2.0 endpoint.
|
||||
var Endpoint = oauth2.Endpoint{
|
||||
AuthURL: "https://oauth.vk.com/authorize",
|
||||
TokenURL: "https://oauth.vk.com/access_token",
|
||||
}
|
||||
11
Godeps/_workspace/src/google.golang.org/cloud/.travis.yml
generated
vendored
Normal file
11
Godeps/_workspace/src/google.golang.org/cloud/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
install:
|
||||
- go get -v google.golang.org/cloud/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -v -tags=integration google.golang.org/cloud/...
|
||||
12
Godeps/_workspace/src/google.golang.org/cloud/AUTHORS
generated
vendored
Normal file
12
Godeps/_workspace/src/google.golang.org/cloud/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Google Inc.
|
||||
Palm Stone Games, Inc.
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
114
Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md
generated
vendored
Normal file
114
Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. Get the cloud package by running `go get -d google.golang.org/cloud`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a command message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change and mail as your recieve feedback.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
Additional to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required. Once you create a project, set the
|
||||
following environment variables to be able to run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
|
||||
Create a storage bucket with the same name as the project id set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
|
||||
The storage integration test will create and delete some objects in this bucket.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create the indexes used in the datastore integration tests with indexes
|
||||
found in `datastore/testdata/index.yaml`:
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Install the app component
|
||||
$ gcloud components update app
|
||||
|
||||
# Set the default project in your env
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
|
||||
```
|
||||
|
||||
You can run the integration tests by running:
|
||||
|
||||
``` sh
|
||||
$ go test -v -tags=integration google.golang.org/cloud/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
||||
24
Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS
generated
vendored
Normal file
24
Godeps/_workspace/src/google.golang.org/cloud/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
202
Godeps/_workspace/src/google.golang.org/cloud/LICENSE
generated
vendored
Normal file
202
Godeps/_workspace/src/google.golang.org/cloud/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
135
Godeps/_workspace/src/google.golang.org/cloud/README.md
generated
vendored
Normal file
135
Godeps/_workspace/src/google.golang.org/cloud/README.md
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
# Google Cloud for Go
|
||||
|
||||
[](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
|
||||
|
||||
**NOTE:** These packages are experimental, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
Go packages for Google Cloud Platform services. Supported APIs include:
|
||||
|
||||
* Google Cloud Datastore
|
||||
* Google Cloud Storage
|
||||
* Google Cloud Pub/Sub
|
||||
* Google Cloud Container Engine
|
||||
|
||||
``` go
|
||||
import "google.golang.org/cloud"
|
||||
```
|
||||
|
||||
Documentation and examples are available at
|
||||
[https://godoc.org/google.golang.org/cloud](https://godoc.org/google.golang.org/cloud).
|
||||
|
||||
## Authorization
|
||||
|
||||
Authorization, throughout the package, is delegated to the godoc.org/golang.org/x/oauth2.
|
||||
Refer to the [godoc documentation](https://godoc.org/golang.org/x/oauth2)
|
||||
for examples on using oauth2 with the Cloud package.
|
||||
|
||||
## Google Cloud Datastore
|
||||
|
||||
[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully
|
||||
managed, schemaless database for storing non-relational data. Cloud Datastore
|
||||
automatically scales with your users and supports ACID transactions, high availability
|
||||
of reads and writes, strong consistency for reads and ancestor queries, and eventual
|
||||
consistency for all other queries.
|
||||
|
||||
Follow the [activation instructions][cloud-datastore-activation] to use the Google
|
||||
Cloud Datastore API with your project.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/datastore](https://godoc.org/google.golang.org/cloud/datastore)
|
||||
|
||||
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := datastore.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
|
||||
data on Google infrastructure with very high reliability, performance and availability,
|
||||
and can be used to distribute large data objects to users via direct download.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/storage](https://godoc.org/google.golang.org/cloud/storage)
|
||||
|
||||
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := storage.NewReader(ctx, "bucket", "object1")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
slurp, err := ioutil.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Google Cloud Pub/Sub (Alpha)
|
||||
|
||||
> Google Cloud Pub/Sub is in **Alpha status**. As a result, it might change in
|
||||
> backward-incompatible ways and is not recommended for production use. It is not
|
||||
> subject to any SLA or deprecation policy.
|
||||
|
||||
[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
|
||||
your services with reliable, many-to-many, asynchronous messaging hosted on Google's
|
||||
infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
|
||||
for building your own robust, global services.
|
||||
|
||||
[https://godoc.org/google.golang.org/cloud/pubsub](https://godoc.org/google.golang.org/cloud/pubsub)
|
||||
|
||||
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
// Pull messages via subscription1.
|
||||
msgs, err := pubsub.Pull(ctx, "subscription1", 1)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
||||
147
Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go
generated
vendored
Normal file
147
Godeps/_workspace/src/google.golang.org/cloud/bigquery/bigquery.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Source is a source of data for the Copy function.
|
||||
type Source interface {
|
||||
implementsSource()
|
||||
}
|
||||
|
||||
// A Destination is a destination of data for the Copy function.
|
||||
type Destination interface {
|
||||
implementsDestination()
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Copy.
|
||||
type Option interface {
|
||||
implementsOption()
|
||||
}
|
||||
|
||||
// A ReadSource is a source of data for the Read function.
|
||||
type ReadSource interface {
|
||||
implementsReadSource()
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to Read.
|
||||
type ReadOption interface {
|
||||
customizeRead(conf *pagingConf)
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
}
|
||||
|
||||
// Note: many of the methods on *Client appear in the various *_op.go source files.
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
// The supplied http.Client is used for making requests to the BigQuery server and must be capable of
|
||||
// authenticating requests with Scope.
|
||||
func NewClient(client *http.Client, projectID string) (*Client, error) {
|
||||
bqService, err := newBigqueryService(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
service: bqService,
|
||||
projectID: projectID,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// initJobProto creates and returns a bigquery Job proto.
|
||||
// The proto is customized using any jobOptions in options.
|
||||
// The list of Options is returned with the jobOptions removed.
|
||||
func initJobProto(projectID string, options []Option) (*bq.Job, []Option) {
|
||||
job := &bq.Job{}
|
||||
|
||||
var other []Option
|
||||
for _, opt := range options {
|
||||
if o, ok := opt.(jobOption); ok {
|
||||
o.customizeJob(job, projectID)
|
||||
} else {
|
||||
other = append(other, opt)
|
||||
}
|
||||
}
|
||||
return job, other
|
||||
}
|
||||
|
||||
// Copy starts a BigQuery operation to copy data from a Source to a Destination.
|
||||
func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error) {
|
||||
switch dst := dst.(type) {
|
||||
case *Table:
|
||||
switch src := src.(type) {
|
||||
case *GCSReference:
|
||||
return c.load(ctx, dst, src, options)
|
||||
case *Table:
|
||||
return c.cp(ctx, dst, Tables{src}, options)
|
||||
case Tables:
|
||||
return c.cp(ctx, dst, src, options)
|
||||
case *Query:
|
||||
return c.query(ctx, dst, src, options)
|
||||
}
|
||||
case *GCSReference:
|
||||
if src, ok := src.(*Table); ok {
|
||||
return c.extract(ctx, dst, src, options)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no Copy operation matches dst/src pair: dst: %T ; src: %T", dst, src)
|
||||
}
|
||||
|
||||
// Read fetches data from a ReadSource and returns the data via an Iterator.
|
||||
func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error) {
|
||||
switch src := src.(type) {
|
||||
case *Job:
|
||||
return c.readQueryResults(src, options)
|
||||
case *Query:
|
||||
return c.executeQuery(ctx, src, options...)
|
||||
case *Table:
|
||||
return c.readTable(src, options)
|
||||
}
|
||||
return nil, fmt.Errorf("src (%T) does not support the Read operation", src)
|
||||
}
|
||||
|
||||
// executeQuery submits a query for execution and returns the results via an Iterator.
|
||||
func (c *Client) executeQuery(ctx context.Context, q *Query, options ...ReadOption) (*Iterator, error) {
|
||||
dest := &Table{}
|
||||
job, err := c.Copy(ctx, dest, q, WriteTruncate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.Read(ctx, job, options...)
|
||||
}
|
||||
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return &Dataset{
|
||||
id: id,
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
47
Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go
generated
vendored
Normal file
47
Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_op.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type copyOption interface {
|
||||
customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string)
|
||||
}
|
||||
|
||||
func (c *Client) cp(ctx context.Context, dst *Table, src Tables, options []Option) (*Job, error) {
|
||||
job, options := initJobProto(c.projectID, options)
|
||||
payload := &bq.JobConfigurationTableCopy{}
|
||||
|
||||
dst.customizeCopyDst(payload, c.projectID)
|
||||
src.customizeCopySrc(payload, c.projectID)
|
||||
|
||||
for _, opt := range options {
|
||||
o, ok := opt.(copyOption)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
|
||||
}
|
||||
o.customizeCopy(payload, c.projectID)
|
||||
}
|
||||
|
||||
job.Configuration = &bq.JobConfiguration{
|
||||
Copy: payload,
|
||||
}
|
||||
return c.service.insertJob(ctx, job, c.projectID)
|
||||
}
|
||||
104
Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go
generated
vendored
Normal file
104
Godeps/_workspace/src/google.golang.org/cloud/bigquery/copy_test.go
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src Tables
|
||||
options []Option
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
src: Tables{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
src: Tables{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
options: []Option{CreateNever, WriteTruncate},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
|
||||
t.Errorf("err calling cp: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
55
Godeps/_workspace/src/google.golang.org/cloud/bigquery/create_table_test.go
generated
vendored
Normal file
55
Godeps/_workspace/src/google.golang.org/cloud/bigquery/create_table_test.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type createTableRecorder struct {
|
||||
conf *createTableConf
|
||||
service
|
||||
}
|
||||
|
||||
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
rec.conf = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateTableOptions(t *testing.T) {
|
||||
s := &createTableRecorder{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
exp := time.Now()
|
||||
q := "query"
|
||||
if _, err := c.CreateTable(context.Background(), "p", "d", "t", TableExpiration(exp), ViewQuery(q)); err != nil {
|
||||
t.Fatalf("err calling CreateTable: %v", err)
|
||||
}
|
||||
want := createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
viewQuery: q,
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
}
|
||||
41
Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go
generated
vendored
Normal file
41
Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
id string
|
||||
client *Client
|
||||
}
|
||||
|
||||
// ListTables returns a list of all the tables contained in the Dataset.
|
||||
func (d *Dataset) ListTables(ctx context.Context) ([]*Table, error) {
|
||||
var tables []*Table
|
||||
|
||||
err := getPages("", func(pageToken string) (string, error) {
|
||||
ts, tok, err := d.client.service.listTables(ctx, d.client.projectID, d.id, pageToken)
|
||||
if err == nil {
|
||||
tables = append(tables, ts...)
|
||||
}
|
||||
return tok, err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tables, nil
|
||||
}
|
||||
105
Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go
generated
vendored
Normal file
105
Godeps/_workspace/src/google.golang.org/cloud/bigquery/dataset_test.go
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
values [][]*Table // contains pages of tables.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
}
|
||||
|
||||
tables := s.values[0]
|
||||
s.values = s.values[1:]
|
||||
return tables, s.pageTokens[pageToken], nil
|
||||
}
|
||||
|
||||
func TestListTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
testCases := []struct {
|
||||
data [][]*Table
|
||||
pageTokens map[string]string
|
||||
want []*Table
|
||||
}{
|
||||
{
|
||||
data: [][]*Table{{t1, t2}, {t3}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: []*Table{t1, t2, t3},
|
||||
},
|
||||
{
|
||||
data: [][]*Table{{t1, t2}, {t3}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: []*Table{t1, t2},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
values: tc.data,
|
||||
pageTokens: tc.pageTokens,
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
got, err := c.Dataset("y").ListTables(context.Background())
|
||||
if err != nil {
|
||||
t.Errorf("err calling ListTables: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestListTablesError(t *testing.T) {
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
// Test that service read errors are propagated back to the caller.
|
||||
// Passing "not y" as the dataset id will cause the service to return an error.
|
||||
_, err := c.Dataset("not y").ListTables(context.Background())
|
||||
if err == nil {
|
||||
// Read should not return an error; only Err should.
|
||||
t.Errorf("ListTables expected: non-nil err, got: nil")
|
||||
}
|
||||
}
|
||||
18
Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go
generated
vendored
Normal file
18
Godeps/_workspace/src/google.golang.org/cloud/bigquery/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package bigquery provides a client for the BigQuery service.
|
||||
//
|
||||
// Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
|
||||
package bigquery // import "google.golang.org/cloud/bigquery"
|
||||
42
Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go
generated
vendored
Normal file
42
Godeps/_workspace/src/google.golang.org/cloud/bigquery/error.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about an error encountered while processing a job.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
59
Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go
generated
vendored
Normal file
59
Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_op.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type extractOption interface {
|
||||
customizeExtract(conf *bq.JobConfigurationExtract, projectID string)
|
||||
}
|
||||
|
||||
// DisableHeader returns an Option that disables the printing of a header row in exported data.
|
||||
func DisableHeader() Option { return disableHeader{} }
|
||||
|
||||
type disableHeader struct{}
|
||||
|
||||
func (opt disableHeader) implementsOption() {}
|
||||
|
||||
func (opt disableHeader) customizeExtract(conf *bq.JobConfigurationExtract, projectID string) {
|
||||
f := false
|
||||
conf.PrintHeader = &f
|
||||
}
|
||||
|
||||
func (c *Client) extract(ctx context.Context, dst *GCSReference, src *Table, options []Option) (*Job, error) {
|
||||
job, options := initJobProto(c.projectID, options)
|
||||
payload := &bq.JobConfigurationExtract{}
|
||||
|
||||
dst.customizeExtractDst(payload, c.projectID)
|
||||
src.customizeExtractSrc(payload, c.projectID)
|
||||
|
||||
for _, opt := range options {
|
||||
o, ok := opt.(extractOption)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
|
||||
}
|
||||
o.customizeExtract(payload, c.projectID)
|
||||
}
|
||||
|
||||
job.Configuration = &bq.JobConfiguration{
|
||||
Extract: payload,
|
||||
}
|
||||
return c.service.insertJob(ctx, job, c.projectID)
|
||||
}
|
||||
97
Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go
generated
vendored
Normal file
97
Godeps/_workspace/src/google.golang.org/cloud/bigquery/extract_test.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultExtractJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
SourceTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
DestinationUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *GCSReference
|
||||
src *Table
|
||||
options []Option
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultGCS,
|
||||
src: defaultTable,
|
||||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS,
|
||||
src: defaultTable,
|
||||
options: []Option{
|
||||
DisableHeader(),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
Compression: Gzip,
|
||||
DestinationFormat: JSON,
|
||||
FieldDelimiter: "\t",
|
||||
},
|
||||
src: defaultTable,
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Extract.Compression = "GZIP"
|
||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Extract.FieldDelimiter = "\t"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
|
||||
t.Errorf("err calling extract: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
112
Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go
generated
vendored
Normal file
112
Godeps/_workspace/src/google.golang.org/cloud/bigquery/gcs.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
uris []string
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when loading or exporting data.
|
||||
// The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// SourceFormat is the format of the GCS data to be loaded into BigQuery.
|
||||
// Allowed values are: CSV, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
// Only used when loading data.
|
||||
Encoding Encoding
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file.
|
||||
// The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true.
|
||||
// Only used when loading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
|
||||
// DestinationFormat is the format to use when writing exported files.
|
||||
// Allowed values are: CSV, Avro, JSON. The default is CSV.
|
||||
// CSV is not supported for tables with nested or repeated fields.
|
||||
DestinationFormat DataFormat
|
||||
// Only used when writing data. Default is None.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) implementsSource() {}
|
||||
func (gcs *GCSReference) implementsDestination() {}
|
||||
|
||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
|
||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
|
||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
|
||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
|
||||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func (c *Client) NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
}
|
||||
|
||||
type DataFormat string
|
||||
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
UTF_8 Encoding = "UTF-8"
|
||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||
)
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
None Compression = "NONE"
|
||||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) customizeLoadSrc(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.SourceUris = gcs.uris
|
||||
conf.SkipLeadingRows = gcs.SkipLeadingRows
|
||||
conf.SourceFormat = string(gcs.SourceFormat)
|
||||
conf.Encoding = string(gcs.Encoding)
|
||||
conf.FieldDelimiter = gcs.FieldDelimiter
|
||||
|
||||
if gcs.ForceZeroQuote {
|
||||
quote := ""
|
||||
conf.Quote = "e
|
||||
} else if gcs.Quote != "" {
|
||||
conf.Quote = &gcs.Quote
|
||||
}
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) customizeExtractDst(conf *bq.JobConfigurationExtract, projectID string) {
|
||||
conf.DestinationUris = gcs.uris
|
||||
conf.Compression = string(gcs.Compression)
|
||||
conf.DestinationFormat = string(gcs.DestinationFormat)
|
||||
conf.FieldDelimiter = gcs.FieldDelimiter
|
||||
}
|
||||
168
Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go
generated
vendored
Normal file
168
Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, c *Client, token string) (*readDataResult, error)
|
||||
}
|
||||
|
||||
// Iterator provides access to the result of a BigQuery lookup.
|
||||
// Next must be called before the first call to Get.
|
||||
type Iterator struct {
|
||||
c *Client
|
||||
|
||||
err error // contains any error encountered during calls to Next.
|
||||
|
||||
// Once Next has been called at least once, rs contains the current
|
||||
// page of data and nextToken contains the token for fetching the next
|
||||
// page (empty if there is no more data to be fetched).
|
||||
rs [][]Value
|
||||
nextToken string
|
||||
|
||||
// The remaining fields contain enough information to fetch the current
|
||||
// page of data, and determine which row of data from this page is the
|
||||
// current row.
|
||||
|
||||
pf pageFetcher
|
||||
pageToken string
|
||||
|
||||
// The offset from the start of the current page to the current row.
|
||||
// For a new iterator, this is -1.
|
||||
offset int64
|
||||
}
|
||||
|
||||
func newIterator(c *Client, pf pageFetcher) *Iterator {
|
||||
return &Iterator{
|
||||
c: c,
|
||||
pf: pf,
|
||||
offset: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// fetchPage loads the current page of data from the server.
|
||||
// The contents of rs and nextToken are replaced with the loaded data.
|
||||
// If there is an error while fetching, the error is stored in it.err and false is returned.
|
||||
func (it *Iterator) fetchPage(ctx context.Context) bool {
|
||||
var res *readDataResult
|
||||
var err error
|
||||
for {
|
||||
res, err = it.pf.fetch(ctx, it.c, it.pageToken)
|
||||
if err != errIncompleteJob {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
it.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
it.rs = res.rows
|
||||
it.nextToken = res.pageToken
|
||||
return true
|
||||
}
|
||||
|
||||
// getEnoughData loads new data into rs until offset no longer points beyond the end of rs.
|
||||
func (it *Iterator) getEnoughData(ctx context.Context) bool {
|
||||
if len(it.rs) == 0 {
|
||||
// Either we have not yet fetched any pages, or we are iterating over an empty dataset.
|
||||
// In the former case, we should fetch a page of data, so that we can depend on the resultant nextToken.
|
||||
// In the latter case, it is harmless to fetch a page of data.
|
||||
if !it.fetchPage(ctx) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for it.offset >= int64(len(it.rs)) {
|
||||
// If offset is still outside the bounds of the loaded data,
|
||||
// but there are no more pages of data to fetch, then we have
|
||||
// failed to satisfy the offset.
|
||||
if it.nextToken == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// offset cannot be satisfied with the currently loaded data,
|
||||
// so we fetch the next page. We no longer need the existing
|
||||
// cached rows, so we remove them and update the offset to be
|
||||
// relative to the new page that we're about to fetch.
|
||||
// NOTE: we can't just set offset to 0, because after
|
||||
// marshalling/unmarshalling, it's possible for the offset to
|
||||
// point arbitrarily far beyond the end of rs.
|
||||
// This can happen if the server returns a different size
|
||||
// results page before and after marshalling.
|
||||
it.offset -= int64(len(it.rs))
|
||||
it.pageToken = it.nextToken
|
||||
if !it.fetchPage(ctx) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Next advances the Iterator to the next row, making that row available
|
||||
// via the Get method.
|
||||
// Next must be called before the first call to Get, and blocks until data is available.
|
||||
// Next returns false when there are no more rows available, either because
|
||||
// the end of the output was reached, or because there was an error (consult
|
||||
// the Err method to determine which).
|
||||
func (it *Iterator) Next(ctx context.Context) bool {
|
||||
if it.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Advance offset to where we want it to be for the next call to Get.
|
||||
it.offset++
|
||||
|
||||
// offset may now point beyond the end of rs, so we fetch data
|
||||
// until offset is within its bounds again. If there are no more
|
||||
// results available, offset will be left pointing beyond the bounds
|
||||
// of rs.
|
||||
// At the end of this method, rs will contain at least one element
|
||||
// unless the dataset we are iterating over is empty.
|
||||
return it.getEnoughData(ctx)
|
||||
}
|
||||
|
||||
// Err returns the last error encountered by Next, or nil for no error.
|
||||
func (it *Iterator) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
// Get loads the current row into dst, which must implement ValueLoader.
|
||||
func (it *Iterator) Get(dst interface{}) error {
|
||||
if it.err != nil {
|
||||
return fmt.Errorf("Get called on iterator in error state: %v", it.err)
|
||||
}
|
||||
|
||||
// If Next has been called, then offset should always index into a
|
||||
// valid row in rs, as long as there is still data available.
|
||||
if it.offset >= int64(len(it.rs)) || it.offset < 0 {
|
||||
return errors.New("Get called without preceding successful call to Next")
|
||||
}
|
||||
|
||||
if dst, ok := dst.(ValueLoader); ok {
|
||||
return dst.Load(it.rs[it.offset])
|
||||
}
|
||||
return errors.New("Get called with unsupported argument type")
|
||||
}
|
||||
|
||||
// TODO(mcgreevy): Add a method to *Iterator that returns a schema which describes the data.
|
||||
415
Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go
generated
vendored
Normal file
415
Godeps/_workspace/src/google.golang.org/cloud/bigquery/iterator_test.go
generated
vendored
Normal file
@@ -0,0 +1,415 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
fetchFailure := errors.New("fetch failure")
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
alreadyConsumed int64 // amount to advance offset before commencing reading.
|
||||
fetchResponses map[string]fetchResponse
|
||||
want []ValueList
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
},
|
||||
{
|
||||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{1, 2}, {11, 12}},
|
||||
wantErr: fetchFailure,
|
||||
},
|
||||
{
|
||||
desc: "Skip over a single element",
|
||||
alreadyConsumed: 1,
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{11, 12}, {101, 102}, {111, 112}},
|
||||
},
|
||||
{
|
||||
desc: "Skip over an entire page",
|
||||
alreadyConsumed: 2,
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{101, 102}, {111, 112}},
|
||||
},
|
||||
{
|
||||
desc: "Skip beyond start of second page",
|
||||
alreadyConsumed: 3,
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{111, 112}},
|
||||
},
|
||||
{
|
||||
desc: "Skip beyond all data",
|
||||
alreadyConsumed: 4,
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
// so we won't even attempt to call Get.
|
||||
want: []ValueList{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newIterator(nil, pf)
|
||||
it.offset += tc.alreadyConsumed
|
||||
|
||||
values, err := consumeIterator(it)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", tc.desc, err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
|
||||
}
|
||||
if it.Err() != tc.wantErr {
|
||||
t.Errorf("%s: iterator.Err:\ngot: %v\nwant: %v", tc.desc, it.Err(), tc.wantErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// consumeIterator reads all values from an iterator and returns them.
|
||||
func consumeIterator(it *Iterator) ([]ValueList, error) {
|
||||
var got []ValueList
|
||||
for it.Next(context.Background()) {
|
||||
var vals ValueList
|
||||
if err := it.Get(&vals); err != nil {
|
||||
return nil, fmt.Errorf("err calling Get: %v", err)
|
||||
} else {
|
||||
got = append(got, vals)
|
||||
}
|
||||
}
|
||||
|
||||
return got, nil
|
||||
}
|
||||
|
||||
func TestGetBeforeNext(t *testing.T) {
|
||||
// TODO: once mashalling/unmarshalling of iterators is implemented, do a similar test for unmarshalled iterators.
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
it := newIterator(nil, pf)
|
||||
var vals ValueList
|
||||
if err := it.Get(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Get before Next")
|
||||
}
|
||||
}
|
||||
|
||||
type delayedPageFetcher struct {
|
||||
pageFetcherStub
|
||||
delayCount int
|
||||
}
|
||||
|
||||
func (pf *delayedPageFetcher) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) {
|
||||
if pf.delayCount > 0 {
|
||||
pf.delayCount--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return pf.pageFetcherStub.fetch(ctx, c, token)
|
||||
}
|
||||
|
||||
func TestIterateIncompleteJob(t *testing.T) {
|
||||
want := []ValueList{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
|
||||
pf := pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
dpf := &delayedPageFetcher{
|
||||
pageFetcherStub: pf,
|
||||
delayCount: 1,
|
||||
}
|
||||
it := newIterator(nil, dpf)
|
||||
|
||||
values, err := consumeIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
t.Fatalf("iterator.Err: got:\n%v", it.Err())
|
||||
}
|
||||
if dpf.delayCount != 0 {
|
||||
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newIterator(nil, pf)
|
||||
var vals ValueList
|
||||
it.Next(context.Background())
|
||||
if it.Err() == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
}
|
||||
if err := it.Get(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Get when iterator has a non-nil error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAfterFinished(t *testing.T) {
|
||||
testCases := []struct {
|
||||
alreadyConsumed int64 // amount to advance offset before commencing reading.
|
||||
fetchResponses map[string]fetchResponse
|
||||
want []ValueList
|
||||
}{
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{},
|
||||
},
|
||||
{
|
||||
alreadyConsumed: 100,
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []ValueList{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newIterator(nil, pf)
|
||||
it.offset += tc.alreadyConsumed
|
||||
|
||||
values, err := consumeIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
|
||||
}
|
||||
if it.Err() != nil {
|
||||
t.Fatalf("iterator.Err: got:\n%v\nwant:\n:nil", it.Err())
|
||||
}
|
||||
// Try calling Get again.
|
||||
var vals ValueList
|
||||
if err := it.Get(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Get when there are no more values")
|
||||
}
|
||||
}
|
||||
}
|
||||
124
Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go
generated
vendored
Normal file
124
Godeps/_workspace/src/google.golang.org/cloud/bigquery/job.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
service service
|
||||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
jobType, err := c.service.getJobType(ctx, c.projectID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Job{
|
||||
service: c.service,
|
||||
projectID: c.projectID,
|
||||
jobID: id,
|
||||
isQuery: jobType == queryJobType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *Job) ID() string {
|
||||
return j.jobID
|
||||
}
|
||||
|
||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||
type State int
|
||||
|
||||
const (
|
||||
Pending State = iota
|
||||
Running
|
||||
Done
|
||||
)
|
||||
|
||||
// JobStatus contains the current State of a job, and errors encountered while processing that job.
|
||||
type JobStatus struct {
|
||||
State State
|
||||
|
||||
err error
|
||||
|
||||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
}
|
||||
|
||||
// jobOption is an Option which modifies a bq.Job proto.
|
||||
// This is used for configuring values that apply to all operations, such as setting a jobReference.
|
||||
type jobOption interface {
|
||||
customizeJob(job *bq.Job, projectID string)
|
||||
}
|
||||
|
||||
type jobID string
|
||||
|
||||
// JobID returns an Option that sets the job ID of a BigQuery job.
|
||||
// If this Option is not used, a job ID is generated automatically.
|
||||
func JobID(ID string) Option {
|
||||
return jobID(ID)
|
||||
}
|
||||
|
||||
func (opt jobID) implementsOption() {}
|
||||
|
||||
func (opt jobID) customizeJob(job *bq.Job, projectID string) {
|
||||
job.JobReference = &bq.JobReference{
|
||||
JobId: string(opt),
|
||||
ProjectId: projectID,
|
||||
}
|
||||
}
|
||||
|
||||
// Done reports whether the job has completed.
|
||||
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
|
||||
func (s *JobStatus) Done() bool {
|
||||
return s.State == Done
|
||||
}
|
||||
|
||||
// Err returns the error that caused the job to complete unsuccesfully (if any).
|
||||
func (s *JobStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
return j.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
func (j *Job) implementsReadSource() {}
|
||||
|
||||
func (j *Job) customizeReadQuery(cursor *readQueryConf) error {
|
||||
// There are mulitple kinds of jobs, but only a query job is suitable for reading.
|
||||
if !j.isQuery {
|
||||
return errors.New("Cannot read from a non-query job")
|
||||
}
|
||||
|
||||
cursor.projectID = j.projectID
|
||||
cursor.jobID = j.jobID
|
||||
return nil
|
||||
}
|
||||
112
Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go
generated
vendored
Normal file
112
Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_op.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type loadOption interface {
|
||||
customizeLoad(conf *bq.JobConfigurationLoad, projectID string)
|
||||
}
|
||||
|
||||
// DestinationSchema returns an Option that specifies the schema to use when loading data into a new table.
|
||||
// A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table.
|
||||
// Caveat: DestinationSchema is not required if the data being loaded is a datastore backup.
|
||||
// schema must not be nil.
|
||||
func DestinationSchema(schema Schema) Option { return destSchema{Schema: schema} }
|
||||
|
||||
type destSchema struct {
|
||||
Schema
|
||||
}
|
||||
|
||||
func (opt destSchema) implementsOption() {}
|
||||
|
||||
func (opt destSchema) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.Schema = opt.asTableSchema()
|
||||
}
|
||||
|
||||
// MaxBadRecords returns an Option that sets the maximum number of bad records that will be ignored.
|
||||
// If this maximum is exceeded, the operation will be unsuccessful.
|
||||
func MaxBadRecords(n int64) Option { return maxBadRecords(n) }
|
||||
|
||||
type maxBadRecords int64
|
||||
|
||||
func (opt maxBadRecords) implementsOption() {}
|
||||
|
||||
func (opt maxBadRecords) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.MaxBadRecords = int64(opt)
|
||||
}
|
||||
|
||||
// AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls.
|
||||
func AllowJaggedRows() Option { return allowJaggedRows{} }
|
||||
|
||||
type allowJaggedRows struct{}
|
||||
|
||||
func (opt allowJaggedRows) implementsOption() {}
|
||||
|
||||
func (opt allowJaggedRows) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.AllowJaggedRows = true
|
||||
}
|
||||
|
||||
// AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.
|
||||
func AllowQuotedNewlines() Option { return allowQuotedNewlines{} }
|
||||
|
||||
type allowQuotedNewlines struct{}
|
||||
|
||||
func (opt allowQuotedNewlines) implementsOption() {}
|
||||
|
||||
func (opt allowQuotedNewlines) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.AllowQuotedNewlines = true
|
||||
}
|
||||
|
||||
// IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated.
|
||||
// Unknown values are ignored. For CSV this ignores extra values at the end of a line.
|
||||
// For JSON this ignores named values that do not match any column name.
|
||||
// If this Option is not used, records containing unknown values are treated as bad records.
|
||||
// The MaxBadRecords Option can be used to customize how bad records are handled.
|
||||
func IgnoreUnknownValues() Option { return ignoreUnknownValues{} }
|
||||
|
||||
type ignoreUnknownValues struct{}
|
||||
|
||||
func (opt ignoreUnknownValues) implementsOption() {}
|
||||
|
||||
func (opt ignoreUnknownValues) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.IgnoreUnknownValues = true
|
||||
}
|
||||
|
||||
func (c *Client) load(ctx context.Context, dst *Table, src *GCSReference, options []Option) (*Job, error) {
|
||||
job, options := initJobProto(c.projectID, options)
|
||||
payload := &bq.JobConfigurationLoad{}
|
||||
|
||||
dst.customizeLoadDst(payload, c.projectID)
|
||||
src.customizeLoadSrc(payload, c.projectID)
|
||||
|
||||
for _, opt := range options {
|
||||
o, ok := opt.(loadOption)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
|
||||
}
|
||||
o.customizeLoad(payload, c.projectID)
|
||||
}
|
||||
|
||||
job.Configuration = &bq.JobConfiguration{
|
||||
Load: payload,
|
||||
}
|
||||
return c.service.insertJob(ctx, job, c.projectID)
|
||||
}
|
||||
198
Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go
generated
vendored
Normal file
198
Godeps/_workspace/src/google.golang.org/cloud/bigquery/load_test.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultLoadJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
SourceUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stringFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
|
||||
}
|
||||
|
||||
func nestedFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: "nested",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{stringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func bqStringFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "fieldname",
|
||||
Type: "STRING",
|
||||
}
|
||||
}
|
||||
|
||||
func bqNestedFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "nested",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *GCSReference
|
||||
options []Option
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: defaultGCS,
|
||||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: defaultGCS,
|
||||
options: []Option{
|
||||
MaxBadRecords(1),
|
||||
AllowJaggedRows(),
|
||||
AllowQuotedNewlines(),
|
||||
IgnoreUnknownValues(),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.MaxBadRecords = 1
|
||||
j.Configuration.Load.AllowJaggedRows = true
|
||||
j.Configuration.Load.AllowQuotedNewlines = true
|
||||
j.Configuration.Load.IgnoreUnknownValues = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
options: []Option{CreateNever, WriteTruncate},
|
||||
src: defaultGCS,
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: defaultGCS,
|
||||
options: []Option{
|
||||
DestinationSchema(Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
}),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Schema = &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
SkipLeadingRows: 1,
|
||||
SourceFormat: JSON,
|
||||
Encoding: UTF_8,
|
||||
FieldDelimiter: "\t",
|
||||
Quote: "-",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
Quote: "",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Quote = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
Quote: "",
|
||||
ForceZeroQuote: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
empty := ""
|
||||
j.Configuration.Load.Quote = &empty
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
|
||||
t.Errorf("err calling load: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("loading: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
42
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go
generated
vendored
Normal file
42
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// Query represents a query to be executed.
|
||||
type Query struct {
|
||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
|
||||
Q string
|
||||
|
||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
|
||||
// If DefaultProjectID is set, DefaultDatasetID must also be set.
|
||||
DefaultProjectID string
|
||||
DefaultDatasetID string
|
||||
}
|
||||
|
||||
func (q *Query) implementsSource() {}
|
||||
|
||||
func (q *Query) implementsReadSource() {}
|
||||
|
||||
func (q *Query) customizeQuerySrc(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
conf.Query = q.Q
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
}
|
||||
89
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go
generated
vendored
Normal file
89
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_op.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type queryOption interface {
|
||||
customizeQuery(conf *bq.JobConfigurationQuery, projectID string)
|
||||
}
|
||||
|
||||
// DisableQueryCache returns an Option that prevents results being fetched from the query cache.
|
||||
// If this Option is not used, results are fetched from the cache if they are available.
|
||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
|
||||
// Cached results are only available when TableID is unspecified in the query's destination Table.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
|
||||
func DisableQueryCache() Option { return disableQueryCache{} }
|
||||
|
||||
type disableQueryCache struct{}
|
||||
|
||||
func (opt disableQueryCache) implementsOption() {}
|
||||
|
||||
func (opt disableQueryCache) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
f := false
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
|
||||
// JobPriority returns an Option that causes a query to be scheduled with the specified priority.
|
||||
// The default priority is InteractivePriority.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
|
||||
func JobPriority(priority string) Option { return jobPriority(priority) }
|
||||
|
||||
type jobPriority string
|
||||
|
||||
func (opt jobPriority) implementsOption() {}
|
||||
|
||||
func (opt jobPriority) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
conf.Priority = string(opt)
|
||||
}
|
||||
|
||||
const (
|
||||
BatchPriority = "BATCH"
|
||||
InteractivePriority = "INTERACTIVE"
|
||||
)
|
||||
|
||||
// TODO(mcgreevy): support large results.
|
||||
// TODO(mcgreevy): support non-flattened results.
|
||||
|
||||
func (c *Client) query(ctx context.Context, dst *Table, src *Query, options []Option) (*Job, error) {
|
||||
job, options := initJobProto(c.projectID, options)
|
||||
payload := &bq.JobConfigurationQuery{}
|
||||
|
||||
dst.customizeQueryDst(payload, c.projectID)
|
||||
src.customizeQuerySrc(payload, c.projectID)
|
||||
|
||||
for _, opt := range options {
|
||||
o, ok := opt.(queryOption)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("option (%#v) not applicable to dst/src pair: dst: %T ; src: %T", opt, dst, src)
|
||||
}
|
||||
o.customizeQuery(payload, c.projectID)
|
||||
}
|
||||
|
||||
job.Configuration = &bq.JobConfiguration{
|
||||
Query: payload,
|
||||
}
|
||||
j, err := c.service.insertJob(ctx, job, c.projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
118
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go
generated
vendored
Normal file
118
Godeps/_workspace/src/google.golang.org/cloud/bigquery/query_test.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
Query: "query string",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *Query
|
||||
options []Option
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: defaultQuery,
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: &Query{
|
||||
Q: "query string",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{},
|
||||
src: defaultQuery,
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: defaultQuery,
|
||||
options: []Option{CreateNever, WriteTruncate},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: defaultTable,
|
||||
src: defaultQuery,
|
||||
options: []Option{DisableQueryCache()},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.UseQueryCache = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
}
|
||||
if _, err := c.Copy(context.Background(), tc.dst, tc.src, tc.options...); err != nil {
|
||||
t.Errorf("err calling query: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
68
Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go
generated
vendored
Normal file
68
Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_op.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.
|
||||
func RecordsPerRequest(n int64) ReadOption { return recordsPerRequest(n) }
|
||||
|
||||
type recordsPerRequest int64
|
||||
|
||||
func (opt recordsPerRequest) customizeRead(conf *pagingConf) {
|
||||
conf.recordsPerRequest = int64(opt)
|
||||
conf.setRecordsPerRequest = true
|
||||
}
|
||||
|
||||
// StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from.
|
||||
func StartIndex(i uint64) ReadOption { return startIndex(i) }
|
||||
|
||||
type startIndex uint64
|
||||
|
||||
func (opt startIndex) customizeRead(conf *pagingConf) {
|
||||
conf.startIndex = uint64(opt)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) {
|
||||
return c.service.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (c *Client) readTable(t *Table, options []ReadOption) (*Iterator, error) {
|
||||
conf := &readTableConf{}
|
||||
t.customizeReadSrc(conf)
|
||||
|
||||
for _, o := range options {
|
||||
o.customizeRead(&conf.paging)
|
||||
}
|
||||
|
||||
return newIterator(c, conf), nil
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) fetch(ctx context.Context, c *Client, token string) (*readDataResult, error) {
|
||||
return c.service.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (c *Client) readQueryResults(job *Job, options []ReadOption) (*Iterator, error) {
|
||||
conf := &readQueryConf{}
|
||||
if err := job.customizeReadQuery(conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, o := range options {
|
||||
o.customizeRead(&conf.paging)
|
||||
}
|
||||
|
||||
return newIterator(c, conf), nil
|
||||
}
|
||||
308
Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go
generated
vendored
Normal file
308
Godeps/_workspace/src/google.golang.org/cloud/bigquery/read_test.go
generated
vendored
Normal file
@@ -0,0 +1,308 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
}
|
||||
|
||||
type readQueryArgs struct {
|
||||
conf *readQueryConf
|
||||
tok string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
readQueryCalls []readQueryArgs
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
}
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
service: service,
|
||||
}
|
||||
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
|
||||
for _, src := range []ReadSource{defaultTable, queryJob} {
|
||||
testCases := []struct {
|
||||
data [][][]Value
|
||||
pageTokens map[string]string
|
||||
want []ValueList
|
||||
}{
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: []ValueList{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
|
||||
},
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: []ValueList{{1, 2}, {11, 12}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
if got, ok := doRead(t, c, src); ok {
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doRead calls Read with a ReadSource. Get is repeatedly called on the Iterator returned by Read and the results are returned.
|
||||
func doRead(t *testing.T, c *Client, src ReadSource) ([]ValueList, bool) {
|
||||
it, err := c.Read(context.Background(), src)
|
||||
if err != nil {
|
||||
t.Errorf("err calling Read: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
var got []ValueList
|
||||
for it.Next(context.Background()) {
|
||||
var vals ValueList
|
||||
if err := it.Get(&vals); err != nil {
|
||||
t.Errorf("err calling Get: %v", err)
|
||||
return nil, false
|
||||
} else {
|
||||
got = append(got, vals)
|
||||
}
|
||||
}
|
||||
|
||||
return got, true
|
||||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
}
|
||||
it, err := c.Read(context.Background(), defaultTable)
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
var vals ValueList
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if !it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: false: want: true")
|
||||
}
|
||||
if !it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: false: want: true")
|
||||
}
|
||||
if err := it.Get(&vals); err != nil {
|
||||
t.Fatalf("Get: got: %v: want: nil", err)
|
||||
}
|
||||
if it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: true: want: false")
|
||||
}
|
||||
if err := it.Get(&vals); err == nil {
|
||||
t.Fatalf("Get: got: %v: want: non-nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
// delayedReadStub simulates reading results from a query that has not yet
|
||||
// completed. Its readQuery method initially reports that the query job is not
|
||||
// yet complete. Subsequently, it proxies the request through to another
|
||||
// service stub.
|
||||
type delayedReadStub struct {
|
||||
numDelays int
|
||||
|
||||
readServiceStub
|
||||
}
|
||||
|
||||
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
if s.numDelays > 0 {
|
||||
s.numDelays--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return s.readServiceStub.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
|
||||
func TestIncompleteJob(t *testing.T) {
|
||||
service := &delayedReadStub{
|
||||
numDelays: 2,
|
||||
readServiceStub: readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
},
|
||||
}
|
||||
c := &Client{service: service}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := c.Read(context.Background(), queryJob)
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
var got ValueList
|
||||
want := ValueList{1, 2}
|
||||
if !it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: false: want: true")
|
||||
}
|
||||
if err := it.Get(&got); err != nil {
|
||||
t.Fatalf("Error calling Get: %v", err)
|
||||
}
|
||||
if service.numDelays != 0 {
|
||||
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
return nil, errors.New("bang!")
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{service: &errorReadService{}}
|
||||
it, err := c.Read(context.Background(), defaultTable)
|
||||
if err != nil {
|
||||
// Read should not return an error; only Err should.
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
if it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: true: want: false")
|
||||
}
|
||||
if err := it.Err(); err.Error() != "bang!" {
|
||||
t.Fatalf("Get: got: %v: want: bang!", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{service: s}
|
||||
it, err := c.Read(context.Background(), defaultTable, RecordsPerRequest(5))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
if !it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: false: want: true")
|
||||
}
|
||||
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readTabledataCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{service: s}
|
||||
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: s,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := c.Read(context.Background(), queryJob, RecordsPerRequest(5))
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
if !it.Next(context.Background()) {
|
||||
t.Fatalf("Next: got: false: want: true")
|
||||
}
|
||||
|
||||
want := []readQueryArgs{{
|
||||
conf: &readQueryConf{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readQueryCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
|
||||
}
|
||||
}
|
||||
106
Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go
generated
vendored
Normal file
106
Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// Schema describes the fields in a table or query result.
|
||||
type Schema []*FieldSchema
|
||||
|
||||
// TODO(mcgreevy): add a function to generate a schema from a struct.
|
||||
|
||||
type FieldSchema struct {
|
||||
// The field name.
|
||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||
// and must start with a letter or underscore.
|
||||
// The maximum length is 128 characters.
|
||||
Name string
|
||||
|
||||
// A description of the field. The maximum length is 16,384 characters.
|
||||
Description string
|
||||
|
||||
// Whether the field may contain multiple values.
|
||||
Repeated bool
|
||||
// Whether the field is required. Ignored if Repeated is true.
|
||||
Required bool
|
||||
|
||||
// The field data type. If Type is Record, then this field contains a nested schema,
|
||||
// which is described by Schema.
|
||||
Type FieldType
|
||||
// Describes the nested schema if Type is set to Record.
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
Type: string(fs.Type),
|
||||
}
|
||||
|
||||
if fs.Repeated {
|
||||
tfs.Mode = "REPEATED"
|
||||
} else if fs.Required {
|
||||
tfs.Mode = "REQUIRED"
|
||||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
Repeated: tfs.Mode == "REPEATED",
|
||||
Required: tfs.Mode == "REQUIRED",
|
||||
Type: FieldType(tfs.Type),
|
||||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
StringFieldType FieldType = "STRING"
|
||||
IntegerFieldType FieldType = "INTEGER"
|
||||
FloatFieldType FieldType = "FLOAT"
|
||||
BooleanFieldType FieldType = "BOOLEAN"
|
||||
TimestampFieldType FieldType = "TIMESTAMP"
|
||||
RecordFieldType FieldType = "RECORD"
|
||||
)
|
||||
168
Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go
generated
vendored
Normal file
168
Godeps/_workspace/src/google.golang.org/cloud/bigquery/schema_test.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Repeated: repeated,
|
||||
Required: required,
|
||||
Type: FieldType(typ),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
schema Schema
|
||||
bqSchema *bq.TableSchema
|
||||
}{
|
||||
{
|
||||
// required
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
// repeated
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", true, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nullable, string
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// integer
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "INTEGER", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "INTEGER", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// float
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "FLOAT", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "FLOAT", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// boolean
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "BOOLEAN", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// timestamp
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "TIMESTAMP", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nested
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
&bq.TableFieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Mode: "REQUIRED",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("inner field", "inner", "STRING", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
&FieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: []*FieldSchema{
|
||||
&FieldSchema{
|
||||
Description: "inner field",
|
||||
Name: "inner",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", bqSchema, tc.bqSchema)
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
if !reflect.DeepEqual(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
403
Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go
generated
vendored
Normal file
403
Godeps/_workspace/src/google.golang.org/cloud/bigquery/service.go
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, job *bq.Job, projectId string) (*Job, error)
|
||||
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
|
||||
// Queries
|
||||
|
||||
// readQuery reads data resulting from a query job. If the job is not
|
||||
// yet complete, an errIncompleteJob is returned. readQuery may be
|
||||
// called repeatedly to wait for results indefinitely.
|
||||
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
|
||||
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, conf *createTableConf) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
|
||||
res, err := s.s.Jobs.Insert(projectID, job).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
type readQueryConf struct {
|
||||
projectID, jobID string
|
||||
paging pagingConf
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
var schemaErr error
|
||||
var schemaFetch sync.WaitGroup
|
||||
if conf.schema == nil {
|
||||
schemaFetch.Add(1)
|
||||
go func() {
|
||||
defer schemaFetch.Done()
|
||||
var t *bq.Table
|
||||
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
if schemaErr == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schemaFetch.Wait()
|
||||
if schemaErr != nil {
|
||||
return nil, schemaErr
|
||||
}
|
||||
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
|
||||
|
||||
// getQueryResultsTimeout controls the maximum duration of a request to the
|
||||
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
|
||||
// cause increased overall latency, as results are returned as soon as they are
|
||||
// available.
|
||||
const getQueryResultsTimeout = time.Minute
|
||||
|
||||
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
|
||||
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
|
||||
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !res.JobComplete {
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
schema := convertTableSchema(res.Schema)
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: res.TotalRows,
|
||||
schema: schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type jobType int
|
||||
|
||||
const (
|
||||
copyJobType jobType = iota
|
||||
extractJobType
|
||||
loadJobType
|
||||
queryJobType
|
||||
)
|
||||
|
||||
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("configuration").
|
||||
Context(ctx).
|
||||
Do()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case res.Configuration.Copy != nil:
|
||||
return copyJobType, nil
|
||||
case res.Configuration.Extract != nil:
|
||||
return extractJobType, nil
|
||||
case res.Configuration.Load != nil:
|
||||
return loadJobType, nil
|
||||
case res.Configuration.Query != nil:
|
||||
return queryJobType, nil
|
||||
default:
|
||||
return 0, errors.New("unknown job type")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("status"). // Only fetch what we need.
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobStatusFromProto(res.Status)
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
res, err := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, convertListedTable(t))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
type createTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
expiration time.Time
|
||||
viewQuery string
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
|
||||
// If viewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
table := &bq.Table{
|
||||
TableReference: &bq.TableReference{
|
||||
ProjectId: conf.projectID,
|
||||
DatasetId: conf.datasetID,
|
||||
TableId: conf.tableID,
|
||||
},
|
||||
}
|
||||
if !conf.expiration.IsZero() {
|
||||
table.ExpirationTime = conf.expiration.UnixNano() / 1000
|
||||
}
|
||||
if conf.viewQuery != "" {
|
||||
table.View = &bq.ViewDefinition{
|
||||
Query: conf.viewQuery,
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
ID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
}
|
||||
if t.ExpirationTime != 0 {
|
||||
md.ExpirationTime = time.Unix(0, t.ExpirationTime*1e6)
|
||||
}
|
||||
if t.CreationTime != 0 {
|
||||
md.CreationTime = time.Unix(0, t.CreationTime*1e6)
|
||||
}
|
||||
if t.LastModifiedTime != 0 {
|
||||
md.LastModifiedTime = time.Unix(0, int64(t.LastModifiedTime*1e6))
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.View = t.View.Query
|
||||
}
|
||||
|
||||
return md
|
||||
}
|
||||
|
||||
func convertListedTable(t *bq.TableListTables) *Table {
|
||||
return &Table{
|
||||
ProjectID: t.TableReference.ProjectId,
|
||||
DatasetID: t.TableReference.DatasetId,
|
||||
TableID: t.TableReference.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
278
Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go
generated
vendored
Normal file
278
Godeps/_workspace/src/google.golang.org/cloud/bigquery/table.go
generated
vendored
Normal file
@@ -0,0 +1,278 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Table is a reference to a BigQuery table.
|
||||
type Table struct {
|
||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
|
||||
// In this case the result will be stored in an ephemeral table.
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
||||
// The maximum length is 1,024 characters.
|
||||
TableID string
|
||||
|
||||
service service
|
||||
}
|
||||
|
||||
// TableMetadata contains information about a BigQuery table.
|
||||
type TableMetadata struct {
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
Schema Schema
|
||||
View string
|
||||
|
||||
ID string // An opaque ID uniquely identifying the table.
|
||||
Type TableType
|
||||
|
||||
// The time when this table expires. If not set, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time
|
||||
|
||||
// The size of the table in bytes.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumBytes int64
|
||||
|
||||
// The number of rows of data in this table.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumRows uint64
|
||||
}
|
||||
|
||||
// Tables is a group of tables. The tables may belong to differing projects or datasets.
|
||||
type Tables []*Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which destination table will be created.
|
||||
// Default is CreateIfNeeded.
|
||||
type TableCreateDisposition string
|
||||
|
||||
const (
|
||||
// The table will be created if it does not already exist. Tables are created atomically on successful completion of a job.
|
||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
|
||||
|
||||
// The table must already exist and will not be automatically created.
|
||||
CreateNever TableCreateDisposition = "CREATE_NEVER"
|
||||
)
|
||||
|
||||
func CreateDisposition(disp TableCreateDisposition) Option { return disp }
|
||||
|
||||
func (opt TableCreateDisposition) implementsOption() {}
|
||||
|
||||
func (opt TableCreateDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.CreateDisposition = string(opt)
|
||||
}
|
||||
|
||||
func (opt TableCreateDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) {
|
||||
conf.CreateDisposition = string(opt)
|
||||
}
|
||||
|
||||
func (opt TableCreateDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
conf.CreateDisposition = string(opt)
|
||||
}
|
||||
|
||||
// TableWriteDisposition specifies how existing data in a destination table is treated.
|
||||
// Default is WriteAppend.
|
||||
type TableWriteDisposition string
|
||||
|
||||
const (
|
||||
// Data will be appended to any existing data in the destination table.
|
||||
// Data is appended atomically on successful completion of a job.
|
||||
WriteAppend TableWriteDisposition = "WRITE_APPEND"
|
||||
|
||||
// Existing data in the destination table will be overwritten.
|
||||
// Data is overwritten atomically on successful completion of a job.
|
||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
|
||||
|
||||
// Writes will fail if the destination table already contains data.
|
||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
|
||||
)
|
||||
|
||||
func WriteDisposition(disp TableWriteDisposition) Option { return disp }
|
||||
|
||||
func (opt TableWriteDisposition) implementsOption() {}
|
||||
|
||||
func (opt TableWriteDisposition) customizeLoad(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.WriteDisposition = string(opt)
|
||||
}
|
||||
|
||||
func (opt TableWriteDisposition) customizeCopy(conf *bq.JobConfigurationTableCopy, projectID string) {
|
||||
conf.WriteDisposition = string(opt)
|
||||
}
|
||||
|
||||
func (opt TableWriteDisposition) customizeQuery(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
conf.WriteDisposition = string(opt)
|
||||
}
|
||||
|
||||
// TableType is the type of table.
|
||||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
)
|
||||
|
||||
func (t *Table) implementsSource() {}
|
||||
func (t *Table) implementsReadSource() {}
|
||||
func (t *Table) implementsDestination() {}
|
||||
func (ts Tables) implementsSource() {}
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
|
||||
func (t *Table) FullyQualifiedName() string {
|
||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
|
||||
func (t *Table) implicitTable() bool {
|
||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
|
||||
}
|
||||
|
||||
func (t *Table) customizeLoadDst(conf *bq.JobConfigurationLoad, projectID string) {
|
||||
conf.DestinationTable = t.tableRefProto()
|
||||
}
|
||||
|
||||
func (t *Table) customizeExtractSrc(conf *bq.JobConfigurationExtract, projectID string) {
|
||||
conf.SourceTable = t.tableRefProto()
|
||||
}
|
||||
|
||||
func (t *Table) customizeCopyDst(conf *bq.JobConfigurationTableCopy, projectID string) {
|
||||
conf.DestinationTable = t.tableRefProto()
|
||||
}
|
||||
|
||||
func (ts Tables) customizeCopySrc(conf *bq.JobConfigurationTableCopy, projectID string) {
|
||||
for _, t := range ts {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Table) customizeQueryDst(conf *bq.JobConfigurationQuery, projectID string) {
|
||||
if !t.implicitTable() {
|
||||
conf.DestinationTable = t.tableRefProto()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Table) customizeReadSrc(cursor *readTableConf) {
|
||||
cursor.projectID = t.ProjectID
|
||||
cursor.datasetID = t.DatasetID
|
||||
cursor.tableID = t.TableID
|
||||
}
|
||||
|
||||
// OpenTable creates a handle to an existing BigQuery table. If the table does not already exist, subsequent uses of the *Table will fail.
|
||||
func (c *Client) OpenTable(projectID, datasetID, tableID string) *Table {
|
||||
return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}
|
||||
}
|
||||
|
||||
// CreateTable creates a table in the BigQuery service and returns a handle to it.
|
||||
func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, options ...CreateTableOption) (*Table, error) {
|
||||
conf := &createTableConf{
|
||||
projectID: projectID,
|
||||
datasetID: datasetID,
|
||||
tableID: tableID,
|
||||
}
|
||||
for _, o := range options {
|
||||
o.customizeCreateTable(conf)
|
||||
}
|
||||
if err := c.service.createTable(ctx, conf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Table{ProjectID: projectID, DatasetID: datasetID, TableID: tableID, service: c.service}, nil
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// A CreateTableOption is an optional argument to CreateTable.
|
||||
type CreateTableOption interface {
|
||||
customizeCreateTable(*createTableConf)
|
||||
}
|
||||
|
||||
type tableExpiration time.Time
|
||||
|
||||
// TableExpiration returns a CreateTableOption which will cause the created table to be deleted after the expiration time.
|
||||
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
|
||||
|
||||
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
|
||||
conf.expiration = time.Time(opt)
|
||||
}
|
||||
|
||||
type viewQuery string
|
||||
|
||||
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
|
||||
// For more information see: https://cloud.google.com/bigquery/querying-data#views
|
||||
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
|
||||
|
||||
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
|
||||
conf.viewQuery = string(opt)
|
||||
}
|
||||
|
||||
// TableMetadataPatch represents a set of changes to a table's metadata.
|
||||
type TableMetadataPatch struct {
|
||||
s service
|
||||
projectID, datasetID, tableID string
|
||||
conf patchTableConf
|
||||
}
|
||||
|
||||
// Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields.
|
||||
// In order to apply the changes, the TableMetadataPatch's Apply method must be called.
|
||||
func (t *Table) Patch() *TableMetadataPatch {
|
||||
return &TableMetadataPatch{
|
||||
s: t.service,
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// Description sets the table description.
|
||||
func (p *TableMetadataPatch) Description(desc string) {
|
||||
p.conf.Description = &desc
|
||||
}
|
||||
|
||||
// Name sets the table name.
|
||||
func (p *TableMetadataPatch) Name(name string) {
|
||||
p.conf.Name = &name
|
||||
}
|
||||
|
||||
// TODO(mcgreevy): support patching the schema.
|
||||
|
||||
// Apply applies the patch operation.
|
||||
func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error) {
|
||||
return p.s.patchTable(ctx, p.projectID, p.datasetID, p.tableID, &p.conf)
|
||||
}
|
||||
51
Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go
generated
vendored
Normal file
51
Godeps/_workspace/src/google.golang.org/cloud/bigquery/utils_test.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var defaultTable = &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
}
|
||||
|
||||
var defaultGCS = &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
}
|
||||
|
||||
var defaultQuery = &Query{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, job *bq.Job, projectID string) (*Job, error) {
|
||||
s.Job = job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
||||
145
Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go
generated
vendored
Normal file
145
Godeps/_workspace/src/google.golang.org/cloud/bigquery/value.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Value stores the contents of a single cell from a BigQuery result.
|
||||
type Value interface{}
|
||||
|
||||
// ValueLoader stores a slice of Values representing a result row from a Read operation.
|
||||
// See Iterator.Get for more information.
|
||||
type ValueLoader interface {
|
||||
Load(v []Value) error
|
||||
}
|
||||
|
||||
// ValueList converts a []Value to implement ValueLoader.
|
||||
type ValueList []Value
|
||||
|
||||
// Load stores a sequence of values in a ValueList.
|
||||
func (vs *ValueList) Load(v []Value) error {
|
||||
*vs = append(*vs, v...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
|
||||
var rs [][]Value
|
||||
for _, r := range rows {
|
||||
row, err := convertRow(r, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, row)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
|
||||
if len(schema) != len(r.F) {
|
||||
return nil, errors.New("schema length does not match row length")
|
||||
}
|
||||
var values []Value
|
||||
for i, cell := range r.F {
|
||||
fs := schema[i]
|
||||
v, err := convertValue(cell.V, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
switch val := val.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case []interface{}:
|
||||
return convertRepeatedRecord(val, typ, schema)
|
||||
case map[string]interface{}:
|
||||
return convertNestedRecord(val, schema)
|
||||
case string:
|
||||
return convertBasicType(val, typ)
|
||||
default:
|
||||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
|
||||
}
|
||||
}
|
||||
|
||||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
var values []Value
|
||||
for _, cell := range vals {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
v, err := convertValue(val, typ, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
|
||||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
|
||||
|
||||
// Nested records are wrapped in a map with a single key, "f".
|
||||
record := val["f"].([]interface{})
|
||||
if len(record) != len(schema) {
|
||||
return nil, errors.New("schema length does not match record length")
|
||||
}
|
||||
|
||||
var values []Value
|
||||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// convertBasicType returns val as an interface with a concrete type specified by typ.
|
||||
func convertBasicType(val string, typ FieldType) (Value, error) {
|
||||
switch typ {
|
||||
case StringFieldType:
|
||||
return val, nil
|
||||
case IntegerFieldType:
|
||||
return strconv.Atoi(val)
|
||||
case FloatFieldType:
|
||||
return strconv.ParseFloat(val, 64)
|
||||
case BooleanFieldType:
|
||||
return strconv.ParseBool(val)
|
||||
case TimestampFieldType:
|
||||
f, err := strconv.ParseFloat(val, 64)
|
||||
return Value(time.Unix(0, int64(f*1e9))), err
|
||||
default:
|
||||
return nil, errors.New("unrecognized type")
|
||||
}
|
||||
}
|
||||
325
Godeps/_workspace/src/google.golang.org/cloud/bigquery/value_test.go
generated
vendored
Normal file
325
Godeps/_workspace/src/google.golang.org/cloud/bigquery/value_test.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestConvertBasicValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{"a", 1, 1.2, true}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertNullValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: nil},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{nil}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{1, 2, 3}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{"v": "1"},
|
||||
map[string]interface{}{"v": "2"},
|
||||
map[string]interface{}{"v": "3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{[]Value{1, 2, 3}}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // only record (repeated ints)
|
||||
"v": []interface{}{ // pointless wrapper.
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "4",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "5",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // the record is a list of length 1, containing an entry for the repeated integer field.
|
||||
[]Value{1, 2, 3}, // the repeated integer field is a list of length 3.
|
||||
},
|
||||
[]Value{ // second record
|
||||
[]Value{4, 5, 6},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{
|
||||
Type: StringFieldType,
|
||||
},
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: StringFieldType},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // first record field (name)
|
||||
"v": "first repeated record",
|
||||
},
|
||||
map[string]interface{}{ // second record field (nested record).
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // nested record fields
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "two",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "second repeated record",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "four",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
// TODO: test with flattenresults.
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // record contains a string followed by a nested record.
|
||||
"first repeated record",
|
||||
[]Value{
|
||||
1,
|
||||
"two",
|
||||
},
|
||||
},
|
||||
[]Value{ // second record.
|
||||
"second repeated record",
|
||||
[]Value{
|
||||
3,
|
||||
"four",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
267
Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go
generated
vendored
Normal file
267
Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin.go
generated
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud"
|
||||
btcspb "google.golang.org/cloud/bigtable/internal/cluster_service_proto"
|
||||
bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto"
|
||||
"google.golang.org/cloud/internal/transport"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const adminAddr = "bigtabletableadmin.googleapis.com:443"
|
||||
|
||||
// AdminClient is a client type for performing admin operations within a specific cluster.
|
||||
type AdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
tClient bttspb.BigtableTableServiceClient
|
||||
|
||||
project, zone, cluster string
|
||||
}
|
||||
|
||||
// NewAdminClient creates a new AdminClient for a given project, zone and cluster.
|
||||
func NewAdminClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*AdminClient, error) {
|
||||
o := []cloud.ClientOption{
|
||||
cloud.WithEndpoint(adminAddr),
|
||||
cloud.WithScopes(AdminScope),
|
||||
cloud.WithUserAgent(clientUserAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &AdminClient{
|
||||
conn: conn,
|
||||
tClient: bttspb.NewBigtableTableServiceClient(conn),
|
||||
|
||||
project: project,
|
||||
zone: zone,
|
||||
cluster: cluster,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the AdminClient.
|
||||
func (ac *AdminClient) Close() {
|
||||
ac.conn.Close()
|
||||
}
|
||||
|
||||
func (ac *AdminClient) clusterPrefix() string {
|
||||
return fmt.Sprintf("projects/%s/zones/%s/clusters/%s", ac.project, ac.zone, ac.cluster)
|
||||
}
|
||||
|
||||
// Tables returns a list of the tables in the cluster.
|
||||
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.ListTablesRequest{
|
||||
Name: prefix,
|
||||
}
|
||||
res, err := ac.tClient.ListTables(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, 0, len(res.Tables))
|
||||
for _, tbl := range res.Tables {
|
||||
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// CreateTable creates a new table in the cluster.
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.CreateTableRequest{
|
||||
Name: prefix,
|
||||
TableId: table,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateColumnFamily creates a new column family in a table.
|
||||
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
|
||||
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.CreateColumnFamilyRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
ColumnFamilyId: family,
|
||||
}
|
||||
_, err := ac.tClient.CreateColumnFamily(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteTable deletes a table and all of its data.
|
||||
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.DeleteTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
_, err := ac.tClient.DeleteTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteColumnFamily deletes a column family in a table and all of its data.
|
||||
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.DeleteColumnFamilyRequest{
|
||||
Name: prefix + "/tables/" + table + "/columnFamilies/" + family,
|
||||
}
|
||||
_, err := ac.tClient.DeleteColumnFamily(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// TableInfo represents information about a table.
|
||||
type TableInfo struct {
|
||||
Families []string
|
||||
}
|
||||
|
||||
// TableInfo retrieves information about a table.
|
||||
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
|
||||
prefix := ac.clusterPrefix()
|
||||
req := &bttspb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
res, err := ac.tClient.GetTable(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ti := &TableInfo{}
|
||||
for fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, fam)
|
||||
}
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
// SetGCPolicy specifies which cells in a column family should be garbage collected.
|
||||
// GC executes opportunistically in the background; table reads may return data
|
||||
// matching the GC policy.
|
||||
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
|
||||
prefix := ac.clusterPrefix()
|
||||
tbl, err := ac.tClient.GetTable(ctx, &bttspb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fam, ok := tbl.ColumnFamilies[family]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown column family %q", family)
|
||||
}
|
||||
fam.GcRule = policy.proto()
|
||||
_, err = ac.tClient.UpdateColumnFamily(ctx, fam)
|
||||
return err
|
||||
}
|
||||
|
||||
const clusterAdminAddr = "bigtableclusteradmin.googleapis.com:443"
|
||||
|
||||
// ClusterAdminClient is a client type for performing admin operations on clusters.
|
||||
// These operations can be substantially more dangerous than those provided by AdminClient.
|
||||
type ClusterAdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
cClient btcspb.BigtableClusterServiceClient
|
||||
|
||||
project string
|
||||
}
|
||||
|
||||
// NewClusterAdminClient creates a new ClusterAdminClient for a given project.
|
||||
func NewClusterAdminClient(ctx context.Context, project string, opts ...cloud.ClientOption) (*ClusterAdminClient, error) {
|
||||
o := []cloud.ClientOption{
|
||||
cloud.WithEndpoint(clusterAdminAddr),
|
||||
cloud.WithScopes(ClusterAdminScope),
|
||||
cloud.WithUserAgent(clientUserAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &ClusterAdminClient{
|
||||
conn: conn,
|
||||
cClient: btcspb.NewBigtableClusterServiceClient(conn),
|
||||
|
||||
project: project,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the ClusterAdminClient.
|
||||
func (cac *ClusterAdminClient) Close() {
|
||||
cac.conn.Close()
|
||||
}
|
||||
|
||||
// ClusterInfo represents information about a cluster.
|
||||
type ClusterInfo struct {
|
||||
Name string // name of the cluster
|
||||
Zone string // GCP zone of the cluster (e.g. "us-central1-a")
|
||||
DisplayName string // display name for UIs
|
||||
ServeNodes int // number of allocated serve nodes
|
||||
}
|
||||
|
||||
var clusterNameRegexp = regexp.MustCompile(`^projects/([^/]+)/zones/([^/]+)/clusters/([a-z][-a-z0-9]*)$`)
|
||||
|
||||
// Clusters returns a list of clusters in the project.
|
||||
func (cac *ClusterAdminClient) Clusters(ctx context.Context) ([]*ClusterInfo, error) {
|
||||
req := &btcspb.ListClustersRequest{
|
||||
Name: "projects/" + cac.project,
|
||||
}
|
||||
res, err := cac.cClient.ListClusters(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(dsymonds): Deal with failed_zones.
|
||||
var cis []*ClusterInfo
|
||||
for _, c := range res.Clusters {
|
||||
m := clusterNameRegexp.FindStringSubmatch(c.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed cluster name %q", c.Name)
|
||||
}
|
||||
cis = append(cis, &ClusterInfo{
|
||||
Name: m[3],
|
||||
Zone: m[2],
|
||||
DisplayName: c.DisplayName,
|
||||
ServeNodes: int(c.ServeNodes),
|
||||
})
|
||||
}
|
||||
return cis, nil
|
||||
}
|
||||
|
||||
/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API.
|
||||
|
||||
// SetClusterSize sets the number of server nodes for this cluster.
|
||||
func (ac *AdminClient) SetClusterSize(ctx context.Context, nodes int) error {
|
||||
req := &btcspb.GetClusterRequest{
|
||||
Name: ac.clusterPrefix(),
|
||||
}
|
||||
clu, err := ac.cClient.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clu.ServeNodes = int32(nodes)
|
||||
_, err = ac.cClient.UpdateCluster(ctx, clu)
|
||||
return err
|
||||
}
|
||||
|
||||
*/
|
||||
59
Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go
generated
vendored
Normal file
59
Godeps/_workspace/src/google.golang.org/cloud/bigtable/admin_test.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud"
|
||||
"google.golang.org/cloud/bigtable/bttest"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
srv, err := bttest.NewServer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer srv.Close()
|
||||
t.Logf("bttest.Server running on %s", srv.Addr)
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
t.Fatalf("grpc.Dial: %v", err)
|
||||
}
|
||||
|
||||
adminClient, err := NewAdminClient(ctx, "proj", "zone", "cluster", cloud.WithBaseGRPC(conn))
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Fetching list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tbls)
|
||||
return tbls
|
||||
}
|
||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
if got, want := list(), []string{"myothertable", "mytable"}; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
if got, want := list(), []string{"mytable"}; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
}
|
||||
529
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go
generated
vendored
Normal file
529
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable.go
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable // import "google.golang.org/cloud/bigtable"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud"
|
||||
btdpb "google.golang.org/cloud/bigtable/internal/data_proto"
|
||||
btspb "google.golang.org/cloud/bigtable/internal/service_proto"
|
||||
"google.golang.org/cloud/internal/transport"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const prodAddr = "bigtable.googleapis.com:443"
|
||||
|
||||
// Client is a client for reading and writing data to tables in a cluster.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client btspb.BigtableServiceClient
|
||||
|
||||
project, zone, cluster string
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given project, zone and cluster.
|
||||
func NewClient(ctx context.Context, project, zone, cluster string, opts ...cloud.ClientOption) (*Client, error) {
|
||||
o := []cloud.ClientOption{
|
||||
cloud.WithEndpoint(prodAddr),
|
||||
cloud.WithScopes(Scope),
|
||||
cloud.WithUserAgent(clientUserAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: btspb.NewBigtableServiceClient(conn),
|
||||
|
||||
project: project,
|
||||
zone: zone,
|
||||
cluster: cluster,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
func (c *Client) Close() {
|
||||
c.conn.Close()
|
||||
}
|
||||
|
||||
func (c *Client) fullTableName(table string) string {
|
||||
return fmt.Sprintf("projects/%s/zones/%s/clusters/%s/tables/%s", c.project, c.zone, c.cluster, table)
|
||||
}
|
||||
|
||||
// A Table refers to a table.
|
||||
type Table struct {
|
||||
c *Client
|
||||
table string
|
||||
}
|
||||
|
||||
// Open opens a table.
|
||||
func (c *Client) Open(table string) *Table {
|
||||
return &Table{
|
||||
c: c,
|
||||
table: table,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
|
||||
|
||||
// ReadRows reads rows from a table. f is called for each row.
|
||||
// If f returns false, the stream is shut down and ReadRows returns.
|
||||
// f owns its argument, and f is called serially.
|
||||
//
|
||||
// By default, the yielded rows will contain all values in all cells.
|
||||
// Use RowFilter to limit the cells returned.
|
||||
func (t *Table) ReadRows(ctx context.Context, arg RowRange, f func(Row) bool, opts ...ReadOption) error {
|
||||
req := &btspb.ReadRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Target: &btspb.ReadRowsRequest_RowRange{arg.proto()},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.set(req)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||
stream, err := t.c.client.ReadRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr := new(chunkReader)
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if row := cr.process(res); row != nil {
|
||||
if !f(row) {
|
||||
// Cancel and drain stream.
|
||||
cancel()
|
||||
for {
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadRow is a convenience implementation of a single-row reader.
|
||||
// A missing row will return a zero-length map and a nil error.
|
||||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
|
||||
var r Row
|
||||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
|
||||
r = rr
|
||||
return true
|
||||
}, opts...)
|
||||
return r, err
|
||||
}
|
||||
|
||||
type chunkReader struct {
|
||||
partial map[string]Row // incomplete rows
|
||||
}
|
||||
|
||||
// process handles a single btspb.ReadRowsResponse.
|
||||
// If it completes a row, that row is returned.
|
||||
func (cr *chunkReader) process(rrr *btspb.ReadRowsResponse) Row {
|
||||
if cr.partial == nil {
|
||||
cr.partial = make(map[string]Row)
|
||||
}
|
||||
row := string(rrr.RowKey)
|
||||
r := cr.partial[row]
|
||||
if r == nil {
|
||||
r = make(Row)
|
||||
cr.partial[row] = r
|
||||
}
|
||||
for _, chunk := range rrr.Chunks {
|
||||
switch c := chunk.Chunk.(type) {
|
||||
case *btspb.ReadRowsResponse_Chunk_ResetRow:
|
||||
r = make(Row)
|
||||
cr.partial[row] = r
|
||||
continue
|
||||
case *btspb.ReadRowsResponse_Chunk_CommitRow:
|
||||
delete(cr.partial, row)
|
||||
if len(r) == 0 {
|
||||
// Treat zero-content commits as absent.
|
||||
continue
|
||||
}
|
||||
return r // assume that this is the last chunk
|
||||
case *btspb.ReadRowsResponse_Chunk_RowContents:
|
||||
decodeFamilyProto(r, row, c.RowContents)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeFamilyProto adds the cell data from f to the given row.
|
||||
func decodeFamilyProto(r Row, row string, f *btdpb.Family) {
|
||||
fam := f.Name // does not have colon
|
||||
for _, col := range f.Columns {
|
||||
for _, cell := range col.Cells {
|
||||
ri := ReadItem{
|
||||
Row: row,
|
||||
Column: fmt.Sprintf("%s:%s", fam, col.Qualifier),
|
||||
Timestamp: Timestamp(cell.TimestampMicros),
|
||||
Value: cell.Value,
|
||||
}
|
||||
r[fam] = append(r[fam], ri)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A RowRange is used to describe the rows to be read.
|
||||
// A RowRange is a half-open interval [Start, Limit) encompassing
|
||||
// all the rows with keys at least as large as Start, and less than Limit.
|
||||
// (Bigtable string comparison is the same as Go's.)
|
||||
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
|
||||
type RowRange struct {
|
||||
start string
|
||||
limit string
|
||||
}
|
||||
|
||||
// NewRange returns the new RowRange [begin, end).
|
||||
func NewRange(begin, end string) RowRange {
|
||||
return RowRange{
|
||||
start: begin,
|
||||
limit: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Unbounded tests whether a RowRange is unbounded.
|
||||
func (r RowRange) Unbounded() bool {
|
||||
return r.limit == ""
|
||||
}
|
||||
|
||||
// Contains says whether the RowRange contains the key.
|
||||
func (r RowRange) Contains(row string) bool {
|
||||
return r.start <= row && (r.limit == "" || r.limit > row)
|
||||
}
|
||||
|
||||
// String provides a printable description of a RowRange.
|
||||
func (r RowRange) String() string {
|
||||
a := strconv.Quote(r.start)
|
||||
if r.Unbounded() {
|
||||
return fmt.Sprintf("[%s,∞)", a)
|
||||
}
|
||||
return fmt.Sprintf("[%s,%q)", a, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) proto() *btdpb.RowRange {
|
||||
if r.Unbounded() {
|
||||
return &btdpb.RowRange{StartKey: []byte(r.start)}
|
||||
}
|
||||
return &btdpb.RowRange{
|
||||
StartKey: []byte(r.start),
|
||||
EndKey: []byte(r.limit),
|
||||
}
|
||||
}
|
||||
|
||||
// SingleRow returns a RowRange for reading a single row.
|
||||
func SingleRow(row string) RowRange {
|
||||
return RowRange{
|
||||
start: row,
|
||||
limit: row + "\x00",
|
||||
}
|
||||
}
|
||||
|
||||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
|
||||
func PrefixRange(prefix string) RowRange {
|
||||
return RowRange{
|
||||
start: prefix,
|
||||
limit: prefixSuccessor(prefix),
|
||||
}
|
||||
}
|
||||
|
||||
// InfiniteRange returns the RowRange consisting of all keys at least as
|
||||
// large as start.
|
||||
func InfiniteRange(start string) RowRange {
|
||||
return RowRange{
|
||||
start: start,
|
||||
limit: "",
|
||||
}
|
||||
}
|
||||
|
||||
// prefixSuccessor returns the lexically smallest string greater than the
|
||||
// prefix, if it exists, or "" otherwise. In either case, it is the string
|
||||
// needed for the Limit of a RowRange.
|
||||
func prefixSuccessor(prefix string) string {
|
||||
if prefix == "" {
|
||||
return "" // infinite range
|
||||
}
|
||||
n := len(prefix)
|
||||
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
|
||||
}
|
||||
if n == -1 {
|
||||
return ""
|
||||
}
|
||||
ans := []byte(prefix[:n])
|
||||
ans = append(ans, prefix[n]+1)
|
||||
return string(ans)
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to ReadRows.
|
||||
type ReadOption interface {
|
||||
set(req *btspb.ReadRowsRequest)
|
||||
}
|
||||
|
||||
// RowFilter returns a ReadOption that applies f to the contents of read rows.
|
||||
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
|
||||
|
||||
type rowFilter struct{ f Filter }
|
||||
|
||||
func (rf rowFilter) set(req *btspb.ReadRowsRequest) { req.Filter = rf.f.proto() }
|
||||
|
||||
// LimitRows returns a ReadOption that will limit the number of rows to be read.
|
||||
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
|
||||
|
||||
type limitRows struct{ limit int64 }
|
||||
|
||||
func (lr limitRows) set(req *btspb.ReadRowsRequest) { req.NumRowsLimit = lr.limit }
|
||||
|
||||
// A Row is returned by ReadRow. The map is keyed by column family (the prefix
|
||||
// of the column name before the colon). The values are the returned ReadItems
|
||||
// for that column family in the order returned by Read.
|
||||
type Row map[string][]ReadItem
|
||||
|
||||
// Key returns the row's key, or "" if the row is empty.
|
||||
func (r Row) Key() string {
|
||||
for _, items := range r {
|
||||
if len(items) > 0 {
|
||||
return items[0].Row
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
|
||||
type ReadItem struct {
|
||||
Row, Column string
|
||||
Timestamp Timestamp
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Apply applies a Mutation to a specific row.
|
||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
if m.cond == nil {
|
||||
req := &btspb.MutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
}
|
||||
res, err := t.c.client.MutateRow(ctx, req)
|
||||
if err == nil {
|
||||
after(res)
|
||||
}
|
||||
return err
|
||||
}
|
||||
req := &btspb.CheckAndMutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
PredicateFilter: m.cond.proto(),
|
||||
}
|
||||
if m.mtrue != nil {
|
||||
req.TrueMutations = m.mtrue.ops
|
||||
}
|
||||
if m.mfalse != nil {
|
||||
req.FalseMutations = m.mfalse.ops
|
||||
}
|
||||
res, err := t.c.client.CheckAndMutateRow(ctx, req)
|
||||
if err == nil {
|
||||
after(res)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// An ApplyOption is an optional argument to Apply.
|
||||
type ApplyOption interface {
|
||||
after(res proto.Message)
|
||||
}
|
||||
|
||||
type applyAfterFunc func(res proto.Message)
|
||||
|
||||
func (a applyAfterFunc) after(res proto.Message) { a(res) }
|
||||
|
||||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
|
||||
// mutation's condition matched.
|
||||
func GetCondMutationResult(matched *bool) ApplyOption {
|
||||
return applyAfterFunc(func(res proto.Message) {
|
||||
if res, ok := res.(*btspb.CheckAndMutateRowResponse); ok {
|
||||
*matched = res.PredicateMatched
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Mutation represents a set of changes for a single row of a table.
|
||||
type Mutation struct {
|
||||
ops []*btdpb.Mutation
|
||||
|
||||
// for conditional mutations
|
||||
cond Filter
|
||||
mtrue, mfalse *Mutation
|
||||
}
|
||||
|
||||
// NewMutation returns a new mutation.
|
||||
func NewMutation() *Mutation {
|
||||
return new(Mutation)
|
||||
}
|
||||
|
||||
// NewCondMutation returns a conditional mutation.
|
||||
// The given row filter determines which mutation is applied:
|
||||
// If the filter matches any cell in the row, mtrue is applied;
|
||||
// otherwise, mfalse is applied.
|
||||
// Either given mutation may be nil.
|
||||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
|
||||
}
|
||||
|
||||
// Set sets a value in a specified column, with the given timestamp.
|
||||
// The timestamp will be truncated to millisecond resolution.
|
||||
// A timestamp of ServerTime means to use the server timestamp.
|
||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
if ts != ServerTime {
|
||||
// Truncate to millisecond resolution, since that's the default table config.
|
||||
// TODO(dsymonds): Provide a way to override this behaviour.
|
||||
ts -= ts % 1000
|
||||
}
|
||||
m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_SetCell_{&btdpb.Mutation_SetCell{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimestampMicros: int64(ts),
|
||||
Value: value,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteTimestampRange deletes all cells whose columns are family:column
|
||||
// and whose timestamps are in the half-open interval [start, end).
|
||||
// If end is zero, it will be interpreted as infinity.
|
||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||
m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromColumn_{&btdpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimeRange: &btdpb.TimestampRange{
|
||||
StartTimestampMicros: int64(start),
|
||||
EndTimestampMicros: int64(end),
|
||||
},
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||
m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromFamily_{&btdpb.Mutation_DeleteFromFamily{
|
||||
FamilyName: family,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteRow deletes the entire row.
|
||||
func (m *Mutation) DeleteRow() {
|
||||
m.ops = append(m.ops, &btdpb.Mutation{Mutation: &btdpb.Mutation_DeleteFromRow_{&btdpb.Mutation_DeleteFromRow{}}})
|
||||
}
|
||||
|
||||
// Timestamp is in units of microseconds since 1 January 1970.
|
||||
type Timestamp int64
|
||||
|
||||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
|
||||
// It indicates that the server's timestamp should be used.
|
||||
const ServerTime Timestamp = -1
|
||||
|
||||
// Time converts a time.Time into a Timestamp.
|
||||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
|
||||
|
||||
// Now returns the Timestamp representation of the current time on the client.
|
||||
func Now() Timestamp { return Time(time.Now()) }
|
||||
|
||||
// Time converts a Timestamp into a time.Time.
|
||||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
|
||||
|
||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
|
||||
// It returns the newly written cells.
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
req := &btspb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
}
|
||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := make(Row)
|
||||
for _, fam := range res.Families { // res is *btdpb.Row, fam is *btdpb.Family
|
||||
decodeFamilyProto(r, row, fam)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ReadModifyWrite represents a set of operations on a single row of a table.
|
||||
// It is like Mutation but for non-idempotent changes.
|
||||
// When applied, these operations operate on the latest values of the row's cells,
|
||||
// and result in a new value being written to the relevant cell with a timestamp
|
||||
// that is max(existing timestamp, current server time).
|
||||
//
|
||||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
|
||||
// be executed serially by the server.
|
||||
type ReadModifyWrite struct {
|
||||
ops []*btdpb.ReadModifyWriteRule
|
||||
}
|
||||
|
||||
// NewReadModifyWrite returns a new ReadModifyWrite.
|
||||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
|
||||
|
||||
// AppendValue appends a value to a specific cell's value.
|
||||
// If the cell is unset, it will be treated as an empty value.
|
||||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||
m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btdpb.ReadModifyWriteRule_AppendValue{v},
|
||||
})
|
||||
}
|
||||
|
||||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
|
||||
// and adds a value to it. If the cell is unset, it will be treated as zero.
|
||||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
|
||||
// operation will fail.
|
||||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||
m.ops = append(m.ops, &btdpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btdpb.ReadModifyWriteRule_IncrementAmount{delta},
|
||||
})
|
||||
}
|
||||
606
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go
generated
vendored
Normal file
606
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bigtable_test.go
generated
vendored
Normal file
@@ -0,0 +1,606 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud"
|
||||
"google.golang.org/cloud/bigtable/bttest"
|
||||
btspb "google.golang.org/cloud/bigtable/internal/service_proto"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func dataChunk(fam, col string, ts int64, data string) string {
|
||||
return fmt.Sprintf("chunks:<row_contents:<name:%q columns:<qualifier:%q cells:<timestamp_micros:%d value:%q>>>>", fam, col, ts, data)
|
||||
}
|
||||
|
||||
func commit() string { return "chunks:<commit_row:true>" }
|
||||
func reset() string { return "chunks:<reset_row:true>" }
|
||||
|
||||
var chunkTests = []struct {
|
||||
desc string
|
||||
chunks []string // sequence of ReadRowsResponse protos in text format
|
||||
want map[string]Row
|
||||
}{
|
||||
{
|
||||
desc: "single row single chunk",
|
||||
chunks: []string{
|
||||
`row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data") + commit(),
|
||||
},
|
||||
want: map[string]Row{
|
||||
"row1": Row{
|
||||
"fam": []ReadItem{{
|
||||
Row: "row1",
|
||||
Column: "fam:col1",
|
||||
Timestamp: 1428382701000000,
|
||||
Value: []byte("data"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "single row multiple chunks",
|
||||
chunks: []string{
|
||||
`row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"),
|
||||
`row_key: "row1" ` + dataChunk("fam", "col2", 1428382702000000, "more data"),
|
||||
`row_key: "row1" ` + commit(),
|
||||
},
|
||||
want: map[string]Row{
|
||||
"row1": Row{
|
||||
"fam": []ReadItem{
|
||||
{
|
||||
Row: "row1",
|
||||
Column: "fam:col1",
|
||||
Timestamp: 1428382701000000,
|
||||
Value: []byte("data"),
|
||||
},
|
||||
{
|
||||
Row: "row1",
|
||||
Column: "fam:col2",
|
||||
Timestamp: 1428382702000000,
|
||||
Value: []byte("more data"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "chunk, reset, chunk, commit",
|
||||
chunks: []string{
|
||||
`row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"),
|
||||
`row_key: "row1" ` + reset(),
|
||||
`row_key: "row1" ` + dataChunk("fam", "col1", 1428382702000000, "data") + commit(),
|
||||
},
|
||||
want: map[string]Row{
|
||||
"row1": Row{
|
||||
"fam": []ReadItem{{
|
||||
Row: "row1",
|
||||
Column: "fam:col1",
|
||||
Timestamp: 1428382702000000,
|
||||
Value: []byte("data"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "chunk, reset, commit",
|
||||
chunks: []string{
|
||||
`row_key: "row1" ` + dataChunk("fam", "col1", 1428382701000000, "data"),
|
||||
`row_key: "row1" ` + reset(),
|
||||
`row_key: "row1" ` + commit(),
|
||||
},
|
||||
want: map[string]Row{},
|
||||
},
|
||||
// TODO(dsymonds): More test cases, including
|
||||
// - multiple rows
|
||||
}
|
||||
|
||||
func TestChunkReader(t *testing.T) {
|
||||
for _, tc := range chunkTests {
|
||||
cr := new(chunkReader)
|
||||
got := make(map[string]Row)
|
||||
for i, txt := range tc.chunks {
|
||||
rrr := new(btspb.ReadRowsResponse)
|
||||
if err := proto.UnmarshalText(txt, rrr); err != nil {
|
||||
t.Fatalf("%s: internal error: bad #%d test text: %v", tc.desc, i, err)
|
||||
}
|
||||
if row := cr.process(rrr); row != nil {
|
||||
got[row.Key()] = row
|
||||
}
|
||||
}
|
||||
// TODO(dsymonds): check for partial rows?
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%s: processed response mismatch.\n got %+v\nwant %+v", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix, succ string
|
||||
}{
|
||||
{"", ""},
|
||||
{"\xff", ""}, // when used, "" means Infinity
|
||||
{"x\xff", "y"},
|
||||
{"\xfe", "\xff"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := prefixSuccessor(tc.prefix)
|
||||
if got != tc.succ {
|
||||
t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ)
|
||||
continue
|
||||
}
|
||||
r := PrefixRange(tc.prefix)
|
||||
if tc.succ == "" && r.limit != "" {
|
||||
t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit)
|
||||
}
|
||||
if tc.succ != "" && r.limit != tc.succ {
|
||||
t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var useProd = flag.String("use_prod", "", `if set to "proj,zone,cluster,table", run integration test against production`)
|
||||
|
||||
func TestClientIntegration(t *testing.T) {
|
||||
start := time.Now()
|
||||
lastCheckpoint := start
|
||||
checkpoint := func(s string) {
|
||||
n := time.Now()
|
||||
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
|
||||
lastCheckpoint = n
|
||||
}
|
||||
|
||||
proj, zone, cluster, table := "proj", "zone", "cluster", "mytable"
|
||||
var clientOpts []cloud.ClientOption
|
||||
timeout := 10 * time.Second
|
||||
if *useProd == "" {
|
||||
srv, err := bttest.NewServer()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer srv.Close()
|
||||
t.Logf("bttest.Server running on %s", srv.Addr)
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
t.Fatalf("grpc.Dial: %v", err)
|
||||
}
|
||||
clientOpts = []cloud.ClientOption{cloud.WithBaseGRPC(conn)}
|
||||
} else {
|
||||
t.Logf("Running test against production")
|
||||
a := strings.Split(*useProd, ",")
|
||||
proj, zone, cluster, table = a[0], a[1], a[2], a[3]
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
client, err := NewClient(ctx, proj, zone, cluster, clientOpts...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
checkpoint("dialed Client")
|
||||
|
||||
adminClient, err := NewAdminClient(ctx, proj, zone, cluster, clientOpts...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
checkpoint("dialed AdminClient")
|
||||
|
||||
// Delete the table at the end of the test.
|
||||
// Do this even before creating the table so that if this is running
|
||||
// against production and CreateTable fails there's a chance of cleaning it up.
|
||||
defer adminClient.DeleteTable(ctx, table)
|
||||
|
||||
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
checkpoint("created table")
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
checkpoint(`created "follows" column family`)
|
||||
|
||||
tbl := client.Open(table)
|
||||
|
||||
// Insert some data.
|
||||
initialData := map[string][]string{
|
||||
"wmckinley": []string{"tjefferson"},
|
||||
"gwashington": []string{"jadams"},
|
||||
"tjefferson": []string{"gwashington", "jadams"}, // wmckinley set conditionally below
|
||||
"jadams": []string{"gwashington", "tjefferson"},
|
||||
}
|
||||
for row, ss := range initialData {
|
||||
mut := NewMutation()
|
||||
for _, name := range ss {
|
||||
mut.Set("follows", name, 0, []byte("1"))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Mutating row %q: %v", row, err)
|
||||
}
|
||||
}
|
||||
checkpoint("inserted initial data")
|
||||
|
||||
// Do a conditional mutation with a complex filter.
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
|
||||
filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter("."))
|
||||
mut := NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
// Do a second condition mutation with a filter that does not match,
|
||||
// and thus no changes should be made.
|
||||
mutTrue = NewMutation()
|
||||
mutTrue.DeleteRow()
|
||||
filter = ColumnFilter("snoop.dogg")
|
||||
mut = NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
checkpoint("did two conditional mutations")
|
||||
|
||||
// Fetch a row.
|
||||
row, err := tbl.ReadRow(ctx, "jadams")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row: %v", err)
|
||||
}
|
||||
wantRow := Row{
|
||||
"follows": []ReadItem{
|
||||
{Row: "jadams", Column: "follows:gwashington", Value: []byte("1")},
|
||||
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
for _, ris := range row {
|
||||
sort.Sort(byColumn(ris))
|
||||
}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
checkpoint("tested ReadRow")
|
||||
|
||||
// Do a bunch of reads with filters.
|
||||
readTests := []struct {
|
||||
desc string
|
||||
rr RowRange
|
||||
filter Filter // may be nil
|
||||
|
||||
// We do the read, grab all the cells, turn them into "<row>-<col>-<val>",
|
||||
// sort that list, and join with a comma.
|
||||
want string
|
||||
}{
|
||||
{
|
||||
desc: "read all, unfiltered",
|
||||
rr: RowRange{},
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with InfiniteRange, unfiltered",
|
||||
rr: InfiniteRange("tjefferson"),
|
||||
want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with NewRange, unfiltered",
|
||||
rr: NewRange("gargamel", "hubbard"),
|
||||
want: "gwashington-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read with PrefixRange, unfiltered",
|
||||
rr: PrefixRange("jad"),
|
||||
want: "jadams-gwashington-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with SingleRow, unfiltered",
|
||||
rr: SingleRow("wmckinley"),
|
||||
want: "wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read all, with ColumnFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
}
|
||||
for _, tc := range readTests {
|
||||
var opts []ReadOption
|
||||
if tc.filter != nil {
|
||||
opts = append(opts, RowFilter(tc.filter))
|
||||
}
|
||||
var elt []string
|
||||
err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
// Use the column qualifier only to make the test data briefer.
|
||||
col := ri.Column[strings.Index(ri.Column, ":")+1:]
|
||||
x := fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value)
|
||||
elt = append(elt, x)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
sort.Strings(elt)
|
||||
if got := strings.Join(elt, ","); got != tc.want {
|
||||
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
checkpoint("tested ReadRows in a few ways")
|
||||
|
||||
// Do a scan and stop part way through.
|
||||
// Verify that the ReadRows callback doesn't keep running.
|
||||
stopped := false
|
||||
err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool {
|
||||
if r.Key() < "h" {
|
||||
return true
|
||||
}
|
||||
if !stopped {
|
||||
stopped = true
|
||||
return false
|
||||
}
|
||||
t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key())
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Partial ReadRows: %v", err)
|
||||
}
|
||||
checkpoint("did partial ReadRows test")
|
||||
|
||||
// Delete a row and check it goes away.
|
||||
mut = NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, "wmckinley", mut); err != nil {
|
||||
t.Errorf("Apply DeleteRow: %v", err)
|
||||
}
|
||||
row, err = tbl.ReadRow(ctx, "wmckinley")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row after DeleteRow: %v", err)
|
||||
}
|
||||
if len(row) != 0 {
|
||||
t.Fatalf("Read non-zero row after DeleteRow: %v", row)
|
||||
}
|
||||
checkpoint("exercised DeleteRow")
|
||||
|
||||
// Check ReadModifyWrite.
|
||||
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
appendRMW := func(b []byte) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.AppendValue("counter", "likes", b)
|
||||
return rmw
|
||||
}
|
||||
incRMW := func(n int64) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.Increment("counter", "likes", n)
|
||||
return rmw
|
||||
}
|
||||
rmwSeq := []struct {
|
||||
desc string
|
||||
rmw *ReadModifyWrite
|
||||
want []byte
|
||||
}{
|
||||
{
|
||||
desc: "append #1",
|
||||
rmw: appendRMW([]byte{0, 0, 0}),
|
||||
want: []byte{0, 0, 0},
|
||||
},
|
||||
{
|
||||
desc: "append #2",
|
||||
rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 17},
|
||||
},
|
||||
{
|
||||
desc: "increment",
|
||||
rmw: incRMW(8),
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 25},
|
||||
},
|
||||
}
|
||||
for _, step := range rmwSeq {
|
||||
row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
|
||||
}
|
||||
clearTimestamps(row)
|
||||
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
|
||||
}
|
||||
}
|
||||
checkpoint("tested ReadModifyWrite")
|
||||
|
||||
// Test arbitrary timestamps more thoroughly.
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
const numVersions = 4
|
||||
mut = NewMutation()
|
||||
for i := 0; i < numVersions; i++ {
|
||||
// Timestamps are used in thousands because the server
|
||||
// only permits that granularity.
|
||||
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
|
||||
}
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err := tbl.ReadRow(ctx, "testrow")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
// These should be returned in descending timestamp order.
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Do the same read, but filter to the latest two versions.
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete the cell with timestamp 2000 and repeat the last read,
|
||||
// checking that we get ts 3000 and ts 1000.
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("ts", "col", 2000, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested multiple versions in a cell")
|
||||
|
||||
// Do highly concurrent reads/writes.
|
||||
// TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved.
|
||||
const maxConcurrency = 100
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
switch r := rand.Intn(100); { // r ∈ [0,100)
|
||||
case 0 <= r && r < 30:
|
||||
// Do a read.
|
||||
_, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1)))
|
||||
if err != nil {
|
||||
t.Errorf("Concurrent read: %v", err)
|
||||
}
|
||||
case 30 <= r && r < 100:
|
||||
// Do a write.
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "col", 0, []byte("data"))
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Errorf("Concurrent write: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoint("tested high concurrency")
|
||||
|
||||
// Large reads, writes and scans.
|
||||
bigBytes := make([]byte, 15<<20) // 15 MB is large
|
||||
nonsense := []byte("lorem ipsum dolor sit amet, ")
|
||||
fill(bigBytes, nonsense)
|
||||
mut = NewMutation()
|
||||
mut.Set("ts", "col", 0, bigBytes)
|
||||
if err := tbl.Apply(ctx, "bigrow", mut); err != nil {
|
||||
t.Errorf("Big write: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "bigrow")
|
||||
if err != nil {
|
||||
t.Errorf("Big read: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Big read returned incorrect bytes: %v", r)
|
||||
}
|
||||
// Now write 1000 rows, each with 82 KB values, then scan them all.
|
||||
medBytes := make([]byte, 82<<10)
|
||||
fill(medBytes, nonsense)
|
||||
sem := make(chan int, 50) // do up to 50 mutations at a time.
|
||||
for i := 0; i < 1000; i++ {
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "big-scan", 0, medBytes)
|
||||
row := fmt.Sprintf("row-%d", i)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
sem <- 1
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Preparing large scan: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
n := 0
|
||||
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
n += len(ri.Value)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, RowFilter(ColumnFilter("big-scan")))
|
||||
if err != nil {
|
||||
t.Errorf("Doing large scan: %v", err)
|
||||
}
|
||||
if want := 1000 * len(medBytes); n != want {
|
||||
t.Errorf("Large scan returned %d bytes, want %d", n, want)
|
||||
}
|
||||
checkpoint("tested big read/write/scan")
|
||||
}
|
||||
|
||||
func fill(b, sub []byte) {
|
||||
for len(b) > len(sub) {
|
||||
n := copy(b, sub)
|
||||
b = b[n:]
|
||||
}
|
||||
}
|
||||
|
||||
type byColumn []ReadItem
|
||||
|
||||
func (b byColumn) Len() int { return len(b) }
|
||||
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
|
||||
|
||||
func clearTimestamps(r Row) {
|
||||
for _, ris := range r {
|
||||
for i := range ris {
|
||||
ris[i].Timestamp = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
839
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go
generated
vendored
Normal file
839
Godeps/_workspace/src/google.golang.org/cloud/bigtable/bttest/inmem.go
generated
vendored
Normal file
@@ -0,0 +1,839 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package bttest contains test helpers for working with the bigtable package.
|
||||
|
||||
To use a Server, create it, and then connect to it with no security:
|
||||
(The project/zone/cluster values are ignored.)
|
||||
srv, err := bttest.NewServer()
|
||||
...
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
...
|
||||
client, err := bigtable.NewClient(ctx, proj, zone, cluster,
|
||||
bigtable.WithBaseGRPC(conn))
|
||||
...
|
||||
*/
|
||||
package bttest // import "google.golang.org/cloud/bigtable/bttest"
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
btdpb "google.golang.org/cloud/bigtable/internal/data_proto"
|
||||
emptypb "google.golang.org/cloud/bigtable/internal/empty"
|
||||
btspb "google.golang.org/cloud/bigtable/internal/service_proto"
|
||||
bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto"
|
||||
bttspb "google.golang.org/cloud/bigtable/internal/table_service_proto"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Server is an in-memory Cloud Bigtable fake.
|
||||
// It is unauthenticated, and only a rough approximation.
|
||||
type Server struct {
|
||||
Addr string
|
||||
|
||||
l net.Listener
|
||||
srv *grpc.Server
|
||||
s *server
|
||||
}
|
||||
|
||||
// server is the real implementation of the fake.
|
||||
// It is a separate and unexported type so the API won't be cluttered with
|
||||
// methods that are only relevant to the fake's implementation.
|
||||
type server struct {
|
||||
mu sync.Mutex
|
||||
tables map[string]*table // keyed by fully qualified name
|
||||
gcc chan int // set when gcloop starts, closed when server shuts down
|
||||
|
||||
// Any unimplemented methods will cause a panic.
|
||||
bttspb.BigtableTableServiceServer
|
||||
btspb.BigtableServiceServer
|
||||
}
|
||||
|
||||
// NewServer creates a new Server. The Server will be listening for gRPC connections
|
||||
// at the address named by the Addr field, without TLS.
|
||||
func NewServer() (*Server, error) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
Addr: l.Addr().String(),
|
||||
l: l,
|
||||
srv: grpc.NewServer(),
|
||||
s: &server{
|
||||
tables: make(map[string]*table),
|
||||
},
|
||||
}
|
||||
bttspb.RegisterBigtableTableServiceServer(s.srv, s.s)
|
||||
btspb.RegisterBigtableServiceServer(s.srv, s.s)
|
||||
|
||||
go s.srv.Serve(s.l)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close shuts down the server.
|
||||
func (s *Server) Close() {
|
||||
s.s.mu.Lock()
|
||||
if s.s.gcc != nil {
|
||||
close(s.s.gcc)
|
||||
}
|
||||
s.s.mu.Unlock()
|
||||
|
||||
s.srv.Stop()
|
||||
s.l.Close()
|
||||
}
|
||||
|
||||
func (s *server) CreateTable(ctx context.Context, req *bttspb.CreateTableRequest) (*bttdpb.Table, error) {
|
||||
tbl := req.Name + "/tables/" + req.TableId
|
||||
|
||||
s.mu.Lock()
|
||||
if _, ok := s.tables[tbl]; ok {
|
||||
s.mu.Unlock()
|
||||
return nil, fmt.Errorf("table %q already exists", tbl)
|
||||
}
|
||||
s.tables[tbl] = newTable()
|
||||
s.mu.Unlock()
|
||||
|
||||
return &bttdpb.Table{Name: tbl}, nil
|
||||
}
|
||||
|
||||
func (s *server) ListTables(ctx context.Context, req *bttspb.ListTablesRequest) (*bttspb.ListTablesResponse, error) {
|
||||
res := &bttspb.ListTablesResponse{}
|
||||
prefix := req.Name + "/tables/"
|
||||
|
||||
s.mu.Lock()
|
||||
for tbl := range s.tables {
|
||||
if strings.HasPrefix(tbl, prefix) {
|
||||
res.Tables = append(res.Tables, &bttdpb.Table{Name: tbl})
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *server) GetTable(ctx context.Context, req *bttspb.GetTableRequest) (*bttdpb.Table, error) {
|
||||
tbl := req.Name
|
||||
|
||||
s.mu.Lock()
|
||||
tblIns, ok := s.tables[tbl]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("table %q not found", tbl)
|
||||
}
|
||||
|
||||
return &bttdpb.Table{
|
||||
Name: tbl,
|
||||
ColumnFamilies: toColumnFamilies(tblIns.families),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *server) DeleteTable(ctx context.Context, req *bttspb.DeleteTableRequest) (*emptypb.Empty, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if _, ok := s.tables[req.Name]; !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.Name)
|
||||
}
|
||||
delete(s.tables, req.Name)
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *server) CreateColumnFamily(ctx context.Context, req *bttspb.CreateColumnFamilyRequest) (*bttdpb.ColumnFamily, error) {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.Name]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.Name)
|
||||
}
|
||||
|
||||
// Check it is unique and record it.
|
||||
fam := req.ColumnFamilyId
|
||||
tbl.mu.Lock()
|
||||
defer tbl.mu.Unlock()
|
||||
if _, ok := tbl.families[fam]; ok {
|
||||
return nil, fmt.Errorf("family %q already exists", fam)
|
||||
}
|
||||
newcf := &columnFamily{
|
||||
name: req.Name + "/columnFamilies/" + fam,
|
||||
}
|
||||
tbl.families[fam] = newcf
|
||||
return newcf.proto(), nil
|
||||
}
|
||||
|
||||
func (s *server) UpdateColumnFamily(ctx context.Context, req *bttdpb.ColumnFamily) (*bttdpb.ColumnFamily, error) {
|
||||
index := strings.Index(req.Name, "/columnFamilies/")
|
||||
if index == -1 {
|
||||
return nil, fmt.Errorf("bad family name %q", req.Name)
|
||||
}
|
||||
tblName := req.Name[:index]
|
||||
fam := req.Name[index+len("/columnFamilies/"):]
|
||||
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[tblName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.Name)
|
||||
}
|
||||
|
||||
tbl.mu.Lock()
|
||||
defer tbl.mu.Unlock()
|
||||
|
||||
// Check it is unique and record it.
|
||||
if _, ok := tbl.families[fam]; !ok {
|
||||
return nil, fmt.Errorf("no such family %q", fam)
|
||||
}
|
||||
|
||||
newcf := &columnFamily{
|
||||
name: req.Name,
|
||||
gcRule: req.GcRule,
|
||||
}
|
||||
// assume that we ALWAYS want to replace by the new setting
|
||||
// we may need partial update through
|
||||
tbl.families[fam] = newcf
|
||||
s.needGC()
|
||||
return newcf.proto(), nil
|
||||
}
|
||||
|
||||
func (s *server) ReadRows(req *btspb.ReadRowsRequest, stream btspb.BigtableService_ReadRowsServer) error {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("no such table %q", req.TableName)
|
||||
}
|
||||
|
||||
var start, end string // half-open interval
|
||||
switch targ := req.Target.(type) {
|
||||
case *btspb.ReadRowsRequest_RowRange:
|
||||
start, end = string(targ.RowRange.StartKey), string(targ.RowRange.EndKey)
|
||||
case *btspb.ReadRowsRequest_RowKey:
|
||||
// A single row read is simply an edge case.
|
||||
start = string(targ.RowKey)
|
||||
end = start + "\x00"
|
||||
default:
|
||||
return fmt.Errorf("unknown ReadRowsRequest.Target oneof %T", targ)
|
||||
}
|
||||
|
||||
// Get rows to stream back.
|
||||
tbl.mu.RLock()
|
||||
si, ei := 0, len(tbl.rows) // half-open interval
|
||||
if start != "" {
|
||||
si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })
|
||||
}
|
||||
if end != "" {
|
||||
ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })
|
||||
}
|
||||
if si >= ei {
|
||||
tbl.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
rows := make([]*row, ei-si)
|
||||
copy(rows, tbl.rows[si:ei])
|
||||
tbl.mu.RUnlock()
|
||||
|
||||
for _, r := range rows {
|
||||
if err := streamRow(stream, r, req.Filter); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func streamRow(stream btspb.BigtableService_ReadRowsServer, r *row, f *btdpb.RowFilter) error {
|
||||
r.mu.Lock()
|
||||
nr := r.copy()
|
||||
r.mu.Unlock()
|
||||
r = nr
|
||||
|
||||
filterRow(f, r)
|
||||
|
||||
rrr := &btspb.ReadRowsResponse{
|
||||
RowKey: []byte(r.key),
|
||||
}
|
||||
for col, cells := range r.cells {
|
||||
i := strings.Index(col, ":") // guaranteed to exist
|
||||
fam, col := col[:i], col[i+1:]
|
||||
if len(cells) == 0 {
|
||||
continue
|
||||
}
|
||||
// TODO(dsymonds): Apply transformers.
|
||||
colm := &btdpb.Column{
|
||||
Qualifier: []byte(col),
|
||||
// Cells is populated below.
|
||||
}
|
||||
for _, cell := range cells {
|
||||
colm.Cells = append(colm.Cells, &btdpb.Cell{
|
||||
TimestampMicros: cell.ts,
|
||||
Value: cell.value,
|
||||
})
|
||||
}
|
||||
rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{
|
||||
Chunk: &btspb.ReadRowsResponse_Chunk_RowContents{&btdpb.Family{
|
||||
Name: fam,
|
||||
Columns: []*btdpb.Column{colm},
|
||||
}},
|
||||
})
|
||||
}
|
||||
rrr.Chunks = append(rrr.Chunks, &btspb.ReadRowsResponse_Chunk{Chunk: &btspb.ReadRowsResponse_Chunk_CommitRow{true}})
|
||||
return stream.Send(rrr)
|
||||
}
|
||||
|
||||
// filterRow modifies a row with the given filter.
|
||||
func filterRow(f *btdpb.RowFilter, r *row) {
|
||||
if f == nil {
|
||||
return
|
||||
}
|
||||
// Handle filters that apply beyond just including/excluding cells.
|
||||
switch f := f.Filter.(type) {
|
||||
case *btdpb.RowFilter_Chain_:
|
||||
for _, sub := range f.Chain.Filters {
|
||||
filterRow(sub, r)
|
||||
}
|
||||
return
|
||||
case *btdpb.RowFilter_Interleave_:
|
||||
srs := make([]*row, 0, len(f.Interleave.Filters))
|
||||
for _, sub := range f.Interleave.Filters {
|
||||
sr := r.copy()
|
||||
filterRow(sub, sr)
|
||||
srs = append(srs, sr)
|
||||
}
|
||||
// merge
|
||||
// TODO(dsymonds): is this correct?
|
||||
r.cells = make(map[string][]cell)
|
||||
for _, sr := range srs {
|
||||
for col, cs := range sr.cells {
|
||||
r.cells[col] = append(r.cells[col], cs...)
|
||||
}
|
||||
}
|
||||
for _, cs := range r.cells {
|
||||
sort.Sort(byDescTS(cs))
|
||||
}
|
||||
return
|
||||
case *btdpb.RowFilter_CellsPerColumnLimitFilter:
|
||||
lim := int(f.CellsPerColumnLimitFilter)
|
||||
for col, cs := range r.cells {
|
||||
if len(cs) > lim {
|
||||
r.cells[col] = cs[:lim]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Any other case, operate on a per-cell basis.
|
||||
for key, cs := range r.cells {
|
||||
i := strings.Index(key, ":") // guaranteed to exist
|
||||
fam, col := key[:i], key[i+1:]
|
||||
r.cells[key] = filterCells(f, fam, col, cs)
|
||||
}
|
||||
}
|
||||
|
||||
func filterCells(f *btdpb.RowFilter, fam, col string, cs []cell) []cell {
|
||||
var ret []cell
|
||||
for _, cell := range cs {
|
||||
if includeCell(f, fam, col, cell) {
|
||||
ret = append(ret, cell)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func includeCell(f *btdpb.RowFilter, fam, col string, cell cell) bool {
|
||||
if f == nil {
|
||||
return true
|
||||
}
|
||||
// TODO(dsymonds): Implement many more filters.
|
||||
switch f := f.Filter.(type) {
|
||||
default:
|
||||
log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f)
|
||||
return true
|
||||
case *btdpb.RowFilter_ColumnQualifierRegexFilter:
|
||||
pat := string(f.ColumnQualifierRegexFilter)
|
||||
rx, err := regexp.Compile(pat)
|
||||
if err != nil {
|
||||
log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err)
|
||||
return false
|
||||
}
|
||||
return rx.MatchString(col)
|
||||
case *btdpb.RowFilter_ValueRegexFilter:
|
||||
pat := string(f.ValueRegexFilter)
|
||||
rx, err := regexp.Compile(pat)
|
||||
if err != nil {
|
||||
log.Printf("Bad value_regex_filter pattern %q: %v", pat, err)
|
||||
return false
|
||||
}
|
||||
return rx.Match(cell.value)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) MutateRow(ctx context.Context, req *btspb.MutateRowRequest) (*emptypb.Empty, error) {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.TableName)
|
||||
}
|
||||
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err := applyMutations(tbl, r, req.Mutations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
func (s *server) CheckAndMutateRow(ctx context.Context, req *btspb.CheckAndMutateRowRequest) (*btspb.CheckAndMutateRowResponse, error) {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.TableName)
|
||||
}
|
||||
|
||||
res := &btspb.CheckAndMutateRowResponse{}
|
||||
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
// Figure out which mutation to apply.
|
||||
whichMut := false
|
||||
if req.PredicateFilter == nil {
|
||||
// Use true_mutations iff row contains any cells.
|
||||
whichMut = len(r.cells) > 0
|
||||
} else {
|
||||
// Use true_mutations iff any cells in the row match the filter.
|
||||
// TODO(dsymonds): This could be cheaper.
|
||||
nr := r.copy()
|
||||
filterRow(req.PredicateFilter, nr)
|
||||
for _, cs := range nr.cells {
|
||||
if len(cs) > 0 {
|
||||
whichMut = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// TODO(dsymonds): Figure out if this is supposed to be set
|
||||
// even when there's no predicate filter.
|
||||
res.PredicateMatched = whichMut
|
||||
}
|
||||
muts := req.FalseMutations
|
||||
if whichMut {
|
||||
muts = req.TrueMutations
|
||||
}
|
||||
|
||||
if err := applyMutations(tbl, r, muts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// applyMutations applies a sequence of mutations to a row.
|
||||
// It assumes r.mu is locked.
|
||||
func applyMutations(tbl *table, r *row, muts []*btdpb.Mutation) error {
|
||||
for _, mut := range muts {
|
||||
switch mut := mut.Mutation.(type) {
|
||||
default:
|
||||
return fmt.Errorf("can't handle mutation type %T", mut)
|
||||
case *btdpb.Mutation_SetCell_:
|
||||
set := mut.SetCell
|
||||
tbl.mu.RLock()
|
||||
_, famOK := tbl.families[set.FamilyName]
|
||||
tbl.mu.RUnlock()
|
||||
if !famOK {
|
||||
return fmt.Errorf("unknown family %q", set.FamilyName)
|
||||
}
|
||||
ts := set.TimestampMicros
|
||||
if ts == -1 { // bigtable.ServerTime
|
||||
ts = time.Now().UnixNano() / 1e3
|
||||
ts -= ts % 1000 // round to millisecond granularity
|
||||
}
|
||||
if !tbl.validTimestamp(ts) {
|
||||
return fmt.Errorf("invalid timestamp %d", ts)
|
||||
}
|
||||
col := fmt.Sprintf("%s:%s", set.FamilyName, set.ColumnQualifier)
|
||||
|
||||
cs := r.cells[col]
|
||||
newCell := cell{ts: ts, value: set.Value}
|
||||
replaced := false
|
||||
for i, cell := range cs {
|
||||
if cell.ts == newCell.ts {
|
||||
cs[i] = newCell
|
||||
replaced = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !replaced {
|
||||
cs = append(cs, newCell)
|
||||
}
|
||||
sort.Sort(byDescTS(cs))
|
||||
r.cells[col] = cs
|
||||
case *btdpb.Mutation_DeleteFromColumn_:
|
||||
del := mut.DeleteFromColumn
|
||||
col := fmt.Sprintf("%s:%s", del.FamilyName, del.ColumnQualifier)
|
||||
|
||||
cs := r.cells[col]
|
||||
if del.TimeRange != nil {
|
||||
tsr := del.TimeRange
|
||||
if !tbl.validTimestamp(tsr.StartTimestampMicros) {
|
||||
return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros)
|
||||
}
|
||||
if !tbl.validTimestamp(tsr.EndTimestampMicros) {
|
||||
return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros)
|
||||
}
|
||||
// Find half-open interval to remove.
|
||||
// Cells are in descending timestamp order,
|
||||
// so the predicates to sort.Search are inverted.
|
||||
si, ei := 0, len(cs)
|
||||
if tsr.StartTimestampMicros > 0 {
|
||||
ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros })
|
||||
}
|
||||
if tsr.EndTimestampMicros > 0 {
|
||||
si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros })
|
||||
}
|
||||
if si < ei {
|
||||
copy(cs[si:], cs[ei:])
|
||||
cs = cs[:len(cs)-(ei-si)]
|
||||
}
|
||||
} else {
|
||||
cs = nil
|
||||
}
|
||||
if len(cs) == 0 {
|
||||
delete(r.cells, col)
|
||||
} else {
|
||||
r.cells[col] = cs
|
||||
}
|
||||
case *btdpb.Mutation_DeleteFromRow_:
|
||||
r.cells = make(map[string][]cell)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) ReadModifyWriteRow(ctx context.Context, req *btspb.ReadModifyWriteRowRequest) (*btdpb.Row, error) {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no such table %q", req.TableName)
|
||||
}
|
||||
|
||||
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
|
||||
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
// Assume all mutations apply to the most recent version of the cell.
|
||||
// TODO(dsymonds): Verify this assumption and document it in the proto.
|
||||
for _, rule := range req.Rules {
|
||||
tbl.mu.RLock()
|
||||
_, famOK := tbl.families[rule.FamilyName]
|
||||
tbl.mu.RUnlock()
|
||||
if !famOK {
|
||||
return nil, fmt.Errorf("unknown family %q", rule.FamilyName)
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s:%s", rule.FamilyName, rule.ColumnQualifier)
|
||||
|
||||
newCell := false
|
||||
if len(r.cells[key]) == 0 {
|
||||
r.cells[key] = []cell{{
|
||||
// TODO(dsymonds): should this set a timestamp?
|
||||
}}
|
||||
newCell = true
|
||||
}
|
||||
cell := &r.cells[key][0]
|
||||
|
||||
switch rule := rule.Rule.(type) {
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown RMW rule oneof %T", rule)
|
||||
case *btdpb.ReadModifyWriteRule_AppendValue:
|
||||
cell.value = append(cell.value, rule.AppendValue...)
|
||||
case *btdpb.ReadModifyWriteRule_IncrementAmount:
|
||||
var v int64
|
||||
if !newCell {
|
||||
if len(cell.value) != 8 {
|
||||
return nil, fmt.Errorf("increment on non-64-bit value")
|
||||
}
|
||||
v = int64(binary.BigEndian.Uint64(cell.value))
|
||||
}
|
||||
v += rule.IncrementAmount
|
||||
var val [8]byte
|
||||
binary.BigEndian.PutUint64(val[:], uint64(v))
|
||||
cell.value = val[:]
|
||||
}
|
||||
updates[key] = *cell
|
||||
}
|
||||
|
||||
res := &btdpb.Row{
|
||||
Key: req.RowKey,
|
||||
}
|
||||
for col, cell := range updates {
|
||||
i := strings.Index(col, ":")
|
||||
fam, qual := col[:i], col[i+1:]
|
||||
var f *btdpb.Family
|
||||
for _, ff := range res.Families {
|
||||
if ff.Name == fam {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
}
|
||||
if f == nil {
|
||||
f = &btdpb.Family{Name: fam}
|
||||
res.Families = append(res.Families, f)
|
||||
}
|
||||
f.Columns = append(f.Columns, &btdpb.Column{
|
||||
Qualifier: []byte(qual),
|
||||
Cells: []*btdpb.Cell{{
|
||||
Value: cell.value,
|
||||
}},
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// needGC is invoked whenever the server needs gcloop running.
|
||||
func (s *server) needGC() {
|
||||
s.mu.Lock()
|
||||
if s.gcc == nil {
|
||||
s.gcc = make(chan int)
|
||||
go s.gcloop(s.gcc)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *server) gcloop(done <-chan int) {
|
||||
const (
|
||||
minWait = 500 // ms
|
||||
maxWait = 1500 // ms
|
||||
)
|
||||
|
||||
for {
|
||||
// Wait for a random time interval.
|
||||
d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond
|
||||
select {
|
||||
case <-time.After(d):
|
||||
case <-done:
|
||||
return // server has been closed
|
||||
}
|
||||
|
||||
// Do a GC pass over all tables.
|
||||
var tables []*table
|
||||
s.mu.Lock()
|
||||
for _, tbl := range s.tables {
|
||||
tables = append(tables, tbl)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
for _, tbl := range tables {
|
||||
tbl.gc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type table struct {
|
||||
mu sync.RWMutex
|
||||
families map[string]*columnFamily // keyed by plain family name
|
||||
rows []*row // sorted by row key
|
||||
rowIndex map[string]*row // indexed by row key
|
||||
}
|
||||
|
||||
func newTable() *table {
|
||||
return &table{
|
||||
families: make(map[string]*columnFamily),
|
||||
rowIndex: make(map[string]*row),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *table) validTimestamp(ts int64) bool {
|
||||
// Assume millisecond granularity is required.
|
||||
return ts%1000 == 0
|
||||
}
|
||||
|
||||
func (t *table) mutableRow(row string) *row {
|
||||
// Try fast path first.
|
||||
t.mu.RLock()
|
||||
r := t.rowIndex[row]
|
||||
t.mu.RUnlock()
|
||||
if r != nil {
|
||||
return r
|
||||
}
|
||||
|
||||
// We probably need to create the row.
|
||||
t.mu.Lock()
|
||||
r = t.rowIndex[row]
|
||||
if r == nil {
|
||||
r = newRow(row)
|
||||
t.rowIndex[row] = r
|
||||
t.rows = append(t.rows, r)
|
||||
sort.Sort(byRowKey(t.rows)) // yay, inefficient!
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *table) gc() {
|
||||
// This method doesn't add or remove rows, so we only need a read lock for the table.
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
// Gather GC rules we'll apply.
|
||||
rules := make(map[string]*bttdpb.GcRule) // keyed by "fam"
|
||||
for fam, cf := range t.families {
|
||||
if cf.gcRule != nil {
|
||||
rules[fam] = cf.gcRule
|
||||
}
|
||||
}
|
||||
if len(rules) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range t.rows {
|
||||
r.mu.Lock()
|
||||
r.gc(rules)
|
||||
r.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type byRowKey []*row
|
||||
|
||||
func (b byRowKey) Len() int { return len(b) }
|
||||
func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key }
|
||||
|
||||
type row struct {
|
||||
key string
|
||||
|
||||
mu sync.Mutex
|
||||
cells map[string][]cell // keyed by full column name; cells are in descending timestamp order
|
||||
}
|
||||
|
||||
func newRow(key string) *row {
|
||||
return &row{
|
||||
key: key,
|
||||
cells: make(map[string][]cell),
|
||||
}
|
||||
}
|
||||
|
||||
// copy returns a copy of the row.
|
||||
// Cell values are aliased.
|
||||
// r.mu should be held.
|
||||
func (r *row) copy() *row {
|
||||
nr := &row{
|
||||
key: r.key,
|
||||
cells: make(map[string][]cell, len(r.cells)),
|
||||
}
|
||||
for col, cs := range r.cells {
|
||||
// Copy the []cell slice, but not the []byte inside each cell.
|
||||
nr.cells[col] = append([]cell(nil), cs...)
|
||||
}
|
||||
return nr
|
||||
}
|
||||
|
||||
// gc applies the given GC rules to the row.
|
||||
// r.mu should be held.
|
||||
func (r *row) gc(rules map[string]*bttdpb.GcRule) {
|
||||
for col, cs := range r.cells {
|
||||
fam := col[:strings.Index(col, ":")]
|
||||
rule, ok := rules[fam]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
r.cells[col] = applyGC(cs, rule)
|
||||
}
|
||||
}
|
||||
|
||||
var gcTypeWarn sync.Once
|
||||
|
||||
// applyGC applies the given GC rule to the cells.
|
||||
func applyGC(cells []cell, rule *bttdpb.GcRule) []cell {
|
||||
switch rule := rule.Rule.(type) {
|
||||
default:
|
||||
// TODO(dsymonds): Support GcRule_Intersection_
|
||||
gcTypeWarn.Do(func() {
|
||||
log.Printf("Unsupported GC rule type %T", rule)
|
||||
})
|
||||
case *bttdpb.GcRule_Union_:
|
||||
for _, sub := range rule.Union.Rules {
|
||||
cells = applyGC(cells, sub)
|
||||
}
|
||||
return cells
|
||||
case *bttdpb.GcRule_MaxAge:
|
||||
// Timestamps are in microseconds.
|
||||
cutoff := time.Now().UnixNano() / 1e3
|
||||
cutoff -= rule.MaxAge.Seconds * 1e6
|
||||
cutoff -= int64(rule.MaxAge.Nanos) / 1e3
|
||||
// The slice of cells in in descending timestamp order.
|
||||
// This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff.
|
||||
si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff })
|
||||
if si < len(cells) {
|
||||
log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si)
|
||||
}
|
||||
return cells[:si]
|
||||
case *bttdpb.GcRule_MaxNumVersions:
|
||||
n := int(rule.MaxNumVersions)
|
||||
if len(cells) > n {
|
||||
log.Printf("bttest: GC MaxNumVersions(%d) deleted %d cells.", n, len(cells)-n)
|
||||
cells = cells[:n]
|
||||
}
|
||||
return cells
|
||||
}
|
||||
return cells
|
||||
}
|
||||
|
||||
type cell struct {
|
||||
ts int64
|
||||
value []byte
|
||||
}
|
||||
|
||||
type byDescTS []cell
|
||||
|
||||
func (b byDescTS) Len() int { return len(b) }
|
||||
func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts }
|
||||
|
||||
type columnFamily struct {
|
||||
name string
|
||||
gcRule *bttdpb.GcRule
|
||||
}
|
||||
|
||||
func (c *columnFamily) proto() *bttdpb.ColumnFamily {
|
||||
return &bttdpb.ColumnFamily{
|
||||
Name: c.name,
|
||||
GcRule: c.gcRule,
|
||||
}
|
||||
}
|
||||
|
||||
func toColumnFamilies(families map[string]*columnFamily) map[string]*bttdpb.ColumnFamily {
|
||||
f := make(map[string]*bttdpb.ColumnFamily)
|
||||
for k, v := range families {
|
||||
f[k] = v.proto()
|
||||
}
|
||||
return f
|
||||
}
|
||||
580
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
580
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
@@ -0,0 +1,580 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
// Command docs are in cbtdoc.go.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud/bigtable"
|
||||
"google.golang.org/cloud/bigtable/internal/cbtrc"
|
||||
)
|
||||
|
||||
var (
|
||||
oFlag = flag.String("o", "", "if set, redirect stdout to this file")
|
||||
|
||||
config *cbtrc.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
clusterAdminClient *bigtable.ClusterAdminClient
|
||||
)
|
||||
|
||||
func getClient() *bigtable.Client {
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func getAdminClient() *bigtable.AdminClient {
|
||||
if adminClient == nil {
|
||||
var err error
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return adminClient
|
||||
}
|
||||
|
||||
func getClusterAdminClient() *bigtable.ClusterAdminClient {
|
||||
if clusterAdminClient == nil {
|
||||
var err error
|
||||
clusterAdminClient, err = bigtable.NewClusterAdminClient(context.Background(), config.Project)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.ClusterAdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return clusterAdminClient
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtrc.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() == 0 {
|
||||
usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *oFlag != "" {
|
||||
f, err := os.Create(*oFlag)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
os.Stdout = f
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == flag.Arg(0) {
|
||||
cmd.do(ctx, flag.Args()[1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Unknown command %q", flag.Arg(0))
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Usage: %s [flags] <command> ...\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, "\n%s", cmdSummary)
|
||||
}
|
||||
|
||||
var cmdSummary string // generated in init, below
|
||||
|
||||
func init() {
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
|
||||
for _, cmd := range commands {
|
||||
fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString(configHelp)
|
||||
cmdSummary = buf.String()
|
||||
}
|
||||
|
||||
var configHelp = `
|
||||
For convenience, values of the -project, -zone, -cluster and -creds flags
|
||||
may be specified in ` + cbtrc.Filename() + ` in this format:
|
||||
project = my-project-123
|
||||
zone = us-central1-b
|
||||
cluster = my-cluster
|
||||
creds = path-to-account-key.json
|
||||
All values are optional, and all will be overridden by flags.
|
||||
`
|
||||
|
||||
var commands = []struct {
|
||||
Name, Desc string
|
||||
do func(context.Context, ...string)
|
||||
Usage string
|
||||
}{
|
||||
{
|
||||
Name: "count",
|
||||
Desc: "Count rows in a table",
|
||||
do: doCount,
|
||||
Usage: "cbt count <table>",
|
||||
},
|
||||
{
|
||||
Name: "createfamily",
|
||||
Desc: "Create a column family",
|
||||
do: doCreateFamily,
|
||||
Usage: "cbt createfamily <table> <family>",
|
||||
},
|
||||
{
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table>",
|
||||
},
|
||||
{
|
||||
Name: "deletefamily",
|
||||
Desc: "Delete a column family",
|
||||
do: doDeleteFamily,
|
||||
Usage: "cbt deletefamily <table> <family>",
|
||||
},
|
||||
{
|
||||
Name: "deleterow",
|
||||
Desc: "Delete a row",
|
||||
do: doDeleteRow,
|
||||
Usage: "cbt deleterow <table> <row>",
|
||||
},
|
||||
{
|
||||
Name: "deletetable",
|
||||
Desc: "Delete a table",
|
||||
do: doDeleteTable,
|
||||
Usage: "cbt deletetable <table>",
|
||||
},
|
||||
{
|
||||
Name: "doc",
|
||||
Desc: "Print documentation for cbt",
|
||||
do: doDoc,
|
||||
Usage: "cbt doc",
|
||||
},
|
||||
{
|
||||
Name: "help",
|
||||
Desc: "Print help text",
|
||||
do: doHelp,
|
||||
Usage: "cbt help [command]",
|
||||
},
|
||||
{
|
||||
Name: "listclusters",
|
||||
Desc: "List clusters in a project",
|
||||
do: doListClusters,
|
||||
Usage: "cbt listclusters",
|
||||
},
|
||||
{
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row>",
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
Desc: "List tables and column families",
|
||||
do: doLS,
|
||||
Usage: "cbt ls List tables\n" +
|
||||
"cbt ls <table> List column families in <table>",
|
||||
},
|
||||
{
|
||||
Name: "read",
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [limit=<row>] [prefix=<prefix>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" limit=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n",
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Desc: "Set value of a cell",
|
||||
do: doSet,
|
||||
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
|
||||
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
||||
"\n" +
|
||||
" ts is an optional integer timestamp.\n" +
|
||||
" If it cannot be parsed, the `@ts` part will be\n" +
|
||||
" interpreted as part of the value.",
|
||||
},
|
||||
/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API.
|
||||
{
|
||||
Name: "setclustersize",
|
||||
Desc: "Set size of a cluster",
|
||||
do: doSetClusterSize,
|
||||
Usage: "cbt setclustersize <num_nodes>",
|
||||
},
|
||||
*/
|
||||
}
|
||||
|
||||
func doCount(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt count <table>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
n := 0
|
||||
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
|
||||
n++
|
||||
return true
|
||||
}, bigtable.RowFilter(bigtable.StripValueFilter()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
fmt.Println(n)
|
||||
}
|
||||
|
||||
func doCreateFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt createfamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateTable(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt createtable <table>")
|
||||
}
|
||||
err := getAdminClient().CreateTable(ctx, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deletefamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteRow(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deleterow <table> <row>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
log.Fatalf("Deleting row: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteTable(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Can't do `cbt deletetable %s`", args)
|
||||
}
|
||||
err := getAdminClient().DeleteTable(ctx, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// to break circular dependencies
|
||||
var (
|
||||
doDocFn func(ctx context.Context, args ...string)
|
||||
doHelpFn func(ctx context.Context, args ...string)
|
||||
)
|
||||
|
||||
func init() {
|
||||
doDocFn = doDocReal
|
||||
doHelpFn = doHelpReal
|
||||
}
|
||||
|
||||
func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) }
|
||||
func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }
|
||||
|
||||
func doDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := docTemplate.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("Bad doc template: %v", err)
|
||||
}
|
||||
out, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatalf("Bad doc output: %v", err)
|
||||
}
|
||||
os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
|
||||
"indent": func(s, ind string) string {
|
||||
ss := strings.Split(s, "\n")
|
||||
for i, p := range ss {
|
||||
ss[i] = ind + p
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
},
|
||||
}).
|
||||
Parse(`
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .Commands}}
|
||||
{{printf "%-25s %s" .Name .Desc}}{{end}}
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
{{range .Commands}}
|
||||
{{.Desc}}
|
||||
|
||||
Usage:
|
||||
{{indent .Usage "\t"}}
|
||||
|
||||
|
||||
|
||||
{{end}}
|
||||
*/
|
||||
package main
|
||||
`))
|
||||
|
||||
func doHelpReal(ctx context.Context, args ...string) {
|
||||
if len(args) == 0 {
|
||||
fmt.Print(cmdSummary)
|
||||
return
|
||||
}
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == args[0] {
|
||||
fmt.Println(cmd.Usage)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Don't know command %q", args[0])
|
||||
}
|
||||
|
||||
func doListClusters(ctx context.Context, args ...string) {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("usage: cbt listclusters")
|
||||
}
|
||||
cis, err := getClusterAdminClient().Clusters(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of clusters: %v", err)
|
||||
}
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Cluster Name\tZone\tInfo\n")
|
||||
fmt.Fprintf(tw, "------------\t----\t----\n")
|
||||
for _, ci := range cis {
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.DisplayName, ci.ServeNodes)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doLookup(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row>")
|
||||
}
|
||||
table, row := args[0], args[1]
|
||||
tbl := getClient().Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
printRow(r)
|
||||
}
|
||||
|
||||
func printRow(r bigtable.Row) {
|
||||
fmt.Println(strings.Repeat("-", 40))
|
||||
fmt.Println(r.Key())
|
||||
|
||||
var fams []string
|
||||
for fam := range r {
|
||||
fams = append(fams, fam)
|
||||
}
|
||||
sort.Strings(fams)
|
||||
for _, fam := range fams {
|
||||
ris := r[fam]
|
||||
sort.Sort(byColumn(ris))
|
||||
for _, ri := range ris {
|
||||
ts := time.Unix(0, int64(ri.Timestamp)*1e3)
|
||||
fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
|
||||
fmt.Printf(" %q\n", ri.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byColumn []bigtable.ReadItem
|
||||
|
||||
func (b byColumn) Len() int { return len(b) }
|
||||
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
|
||||
|
||||
func doLS(ctx context.Context, args ...string) {
|
||||
switch len(args) {
|
||||
default:
|
||||
log.Fatalf("Can't do `cbt ls %s`", args)
|
||||
case 0:
|
||||
tables, err := getAdminClient().Tables(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tables)
|
||||
for _, table := range tables {
|
||||
fmt.Println(table)
|
||||
}
|
||||
case 1:
|
||||
table := args[0]
|
||||
ti, err := getAdminClient().TableInfo(ctx, table)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting table info: %v", err)
|
||||
}
|
||||
sort.Strings(ti.Families)
|
||||
for _, fam := range ti.Families {
|
||||
fmt.Println(fam)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doRead(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatalf("usage: cbt read <table> [args ...]")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
parsed := make(map[string]string)
|
||||
for _, arg := range args[1:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "start", "limit", "prefix":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
if (parsed["start"] != "" || parsed["limit"] != "") && parsed["prefix"] != "" {
|
||||
log.Fatal(`"start"/"limit" may not be mixed with "prefix"`)
|
||||
}
|
||||
|
||||
var rr bigtable.RowRange
|
||||
if start, limit := parsed["start"], parsed["limit"]; limit != "" {
|
||||
rr = bigtable.NewRange(start, limit)
|
||||
} else if start != "" {
|
||||
rr = bigtable.InfiniteRange(start)
|
||||
}
|
||||
if prefix := parsed["prefix"]; prefix != "" {
|
||||
rr = bigtable.PrefixRange(prefix)
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
|
||||
printRow(r)
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
|
||||
|
||||
func doSet(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
row := args[1]
|
||||
mut := bigtable.NewMutation()
|
||||
for _, arg := range args[2:] {
|
||||
m := setArg.FindStringSubmatch(arg)
|
||||
if m == nil {
|
||||
log.Fatalf("Bad set arg %q", arg)
|
||||
}
|
||||
val := m[3]
|
||||
ts := bigtable.Now()
|
||||
if i := strings.LastIndex(val, "@"); i >= 0 {
|
||||
// Try parsing a timestamp.
|
||||
n, err := strconv.ParseInt(val[i+1:], 0, 64)
|
||||
if err == nil {
|
||||
val = val[:i]
|
||||
ts = bigtable.Timestamp(n)
|
||||
}
|
||||
}
|
||||
mut.Set(m[1], m[2], ts, []byte(val))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
log.Fatalf("Applying mutation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO(dsymonds): Re-enable when there's a ClusterAdmin API.
|
||||
func doSetClusterSize(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("usage: cbt setclustersize <num_nodes>")
|
||||
}
|
||||
n, err := strconv.ParseInt(args[0], 0, 32)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad num_nodes value %q: %v", args[0], err)
|
||||
}
|
||||
if err := getAdminClient().SetClusterSize(ctx, int(n)); err != nil {
|
||||
log.Fatalf("Setting cluster size: %v", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
146
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
146
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
|
||||
count Count rows in a table
|
||||
createfamily Create a column family
|
||||
createtable Create a table
|
||||
deletefamily Delete a column family
|
||||
deleterow Delete a row
|
||||
deletetable Delete a table
|
||||
doc Print documentation for cbt
|
||||
help Print help text
|
||||
listclusters List clusters in a project
|
||||
lookup Read from a single row
|
||||
ls List tables and column families
|
||||
read Read rows
|
||||
set Set value of a cell
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
|
||||
Count rows in a table
|
||||
|
||||
Usage:
|
||||
cbt count <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a column family
|
||||
|
||||
Usage:
|
||||
cbt createfamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a table
|
||||
|
||||
Usage:
|
||||
cbt createtable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a column family
|
||||
|
||||
Usage:
|
||||
cbt deletefamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a row
|
||||
|
||||
Usage:
|
||||
cbt deleterow <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a table
|
||||
|
||||
Usage:
|
||||
cbt deletetable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print documentation for cbt
|
||||
|
||||
Usage:
|
||||
cbt doc
|
||||
|
||||
|
||||
|
||||
|
||||
Print help text
|
||||
|
||||
Usage:
|
||||
cbt help [command]
|
||||
|
||||
|
||||
|
||||
|
||||
List clusters in a project
|
||||
|
||||
Usage:
|
||||
cbt listclusters
|
||||
|
||||
|
||||
|
||||
|
||||
Read from a single row
|
||||
|
||||
Usage:
|
||||
cbt lookup <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
List tables and column families
|
||||
|
||||
Usage:
|
||||
cbt ls List tables
|
||||
cbt ls <table> List column families in <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Read rows
|
||||
|
||||
Usage:
|
||||
cbt read <table> [start=<row>] [limit=<row>] [prefix=<prefix>]
|
||||
start=<row> Start reading at this row
|
||||
limit=<row> Stop reading before this row
|
||||
prefix=<prefix> Read rows with this prefix
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Set value of a cell
|
||||
|
||||
Usage:
|
||||
cbt set <table> <row> family:column=val[@ts] ...
|
||||
family:column=val[@ts] may be repeated to set multiple cells.
|
||||
|
||||
ts is an optional integer timestamp.
|
||||
If it cannot be parsed, the `@ts` part will be
|
||||
interpreted as part of the value.
|
||||
|
||||
|
||||
|
||||
|
||||
*/
|
||||
package main
|
||||
159
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
159
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Loadtest does some load testing through the Go client library for Cloud Bigtable.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/cloud/bigtable"
|
||||
"google.golang.org/cloud/bigtable/internal/cbtrc"
|
||||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
|
||||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
|
||||
|
||||
config *cbtrc.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
)
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtrc.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
log.Printf("Dialing connections...")
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Zone, config.Cluster)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Zone, config.Cluster)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
// Create a scratch table.
|
||||
log.Printf("Setting up scratch table...")
|
||||
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
|
||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
|
||||
log.Fatalf("Making scratch table column family: %v", err)
|
||||
}
|
||||
// Upon a successful run, delete the table. Don't bother checking for errors.
|
||||
defer adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
|
||||
log.Printf("Starting load test... (run for %v)", *runFor)
|
||||
tbl := client.Open(*scratchTable)
|
||||
sem := make(chan int, 100) // limit the number of requests happening at once
|
||||
var reads, writes stats
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
ok := true
|
||||
opStart := time.Now()
|
||||
var stats *stats
|
||||
defer func() {
|
||||
stats.Record(ok, time.Since(opStart))
|
||||
}()
|
||||
|
||||
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows
|
||||
|
||||
switch rand.Intn(10) {
|
||||
default:
|
||||
// read
|
||||
stats = &reads
|
||||
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
|
||||
if err != nil {
|
||||
log.Printf("Error doing read: %v", err)
|
||||
ok = false
|
||||
}
|
||||
case 0, 1, 2, 3, 4:
|
||||
// write
|
||||
stats = &writes
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
|
||||
if err := tbl.Apply(context.Background(), row, mut); err != nil {
|
||||
log.Printf("Error doing mutation: %v", err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, newAggregate(reads.ds))
|
||||
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, newAggregate(writes.ds))
|
||||
}
|
||||
|
||||
var allStats int64 // atomic
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
tries, ok int
|
||||
ds []time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) Record(ok bool, d time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.tries++
|
||||
if ok {
|
||||
s.ok++
|
||||
}
|
||||
s.ds = append(s.ds, d)
|
||||
s.mu.Unlock()
|
||||
|
||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
|
||||
log.Printf("Progress: done %d ops", n)
|
||||
}
|
||||
}
|
||||
97
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go
generated
vendored
Normal file
97
Godeps/_workspace/src/google.golang.org/cloud/bigtable/cmd/loadtest/stats.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
type byDuration []time.Duration
|
||||
|
||||
func (data byDuration) Len() int { return len(data) }
|
||||
func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] }
|
||||
func (data byDuration) Less(i, j int) bool { return data[i] < data[j] }
|
||||
|
||||
// quantile returns a value representing the kth of q quantiles.
|
||||
// May alter the order of data.
|
||||
func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) {
|
||||
if len(data) < 1 {
|
||||
return 0, false
|
||||
}
|
||||
if k > q {
|
||||
return 0, false
|
||||
}
|
||||
if k < 0 || q < 1 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
sort.Sort(byDuration(data))
|
||||
|
||||
if k == 0 {
|
||||
return data[0], true
|
||||
}
|
||||
if k == q {
|
||||
return data[len(data)-1], true
|
||||
}
|
||||
|
||||
bucketSize := float64(len(data)-1) / float64(q)
|
||||
i := float64(k) * bucketSize
|
||||
|
||||
lower := int(math.Trunc(i))
|
||||
var upper int
|
||||
if i > float64(lower) && lower+1 < len(data) {
|
||||
// If the quantile lies between two elements
|
||||
upper = lower + 1
|
||||
} else {
|
||||
upper = lower
|
||||
}
|
||||
weightUpper := i - float64(lower)
|
||||
weightLower := 1 - weightUpper
|
||||
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
|
||||
}
|
||||
|
||||
type aggregate struct {
|
||||
min, median, max time.Duration
|
||||
p95, p99 time.Duration // percentiles
|
||||
}
|
||||
|
||||
// newAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data.
|
||||
func newAggregate(latencies []time.Duration) *aggregate {
|
||||
var agg aggregate
|
||||
|
||||
if len(latencies) == 0 {
|
||||
return nil
|
||||
}
|
||||
var ok bool
|
||||
if agg.min, ok = quantile(latencies, 0, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.median, ok = quantile(latencies, 1, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.max, ok = quantile(latencies, 2, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.p95, ok = quantile(latencies, 95, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.p99, ok = quantile(latencies, 99, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
return &agg
|
||||
}
|
||||
|
||||
func (agg *aggregate) String() string {
|
||||
if agg == nil {
|
||||
return "no data"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
|
||||
fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n",
|
||||
agg.min, agg.median, agg.max, agg.p95, agg.p99)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
108
Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go
generated
vendored
Normal file
108
Godeps/_workspace/src/google.golang.org/cloud/bigtable/doc.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package bigtable is an API to Google Cloud Bigtable.
|
||||
|
||||
See https://cloud.google.com/bigtable/docs/ for general product documentation.
|
||||
|
||||
Setup and Credentials
|
||||
|
||||
Use NewClient or NewAdminClient to create a client that can be used to access
|
||||
the data or admin APIs respectively. Both require credentials that have permission
|
||||
to access the Cloud Bigtable API.
|
||||
|
||||
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
|
||||
(https://developers.google.com/accounts/docs/application-default-credentials)
|
||||
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
|
||||
|
||||
To use alternate credentials, pass them to NewClient or NewAdminClient using cloud.WithTokenSource.
|
||||
For instance, you can use service account credentials by visiting
|
||||
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
|
||||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
|
||||
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
|
||||
...
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
|
||||
...
|
||||
client, err := bigtable.NewClient(ctx, project, zone, cluster, cloud.WithTokenSource(config.TokenSource(ctx)))
|
||||
...
|
||||
Here, `google` means the golang.org/x/oauth2/google package
|
||||
and `cloud` means the google.golang.org/cloud package.
|
||||
|
||||
Reading
|
||||
|
||||
The principal way to read from a Bigtable is to use the ReadRows method on *Table.
|
||||
A RowRange specifies a contiguous portion of a table. A Filter may be provided through
|
||||
RowFilter to limit or transform the data that is returned.
|
||||
tbl := client.Open("mytable")
|
||||
...
|
||||
// Read all the rows starting with "com.google.",
|
||||
// but only fetch the columns in the "links" family.
|
||||
rr := bigtable.PrefixRange("com.google.")
|
||||
err := tbl.ReadRows(ctx, rr, func(r Row) bool {
|
||||
// do something with r
|
||||
return true // keep going
|
||||
}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
|
||||
...
|
||||
|
||||
To read a single row, use the ReadRow helper method.
|
||||
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
|
||||
...
|
||||
|
||||
Writing
|
||||
|
||||
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
|
||||
The former expresses idempotent operations.
|
||||
The latter expresses non-idempotent operations and returns the new values of updated cells.
|
||||
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
|
||||
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
|
||||
methods on a Table.
|
||||
|
||||
For instance, to set a couple of cells in a table,
|
||||
tbl := client.Open("mytable")
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
|
||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
|
||||
err := tbl.Apply(ctx, "com.google.cloud", mut)
|
||||
...
|
||||
|
||||
To increment an encoded value in one cell,
|
||||
tbl := client.Open("mytable")
|
||||
rmw := bigtable.NewReadModifyWrite()
|
||||
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
|
||||
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
|
||||
...
|
||||
*/
|
||||
package bigtable // import "google.golang.org/cloud/bigtable"
|
||||
|
||||
// Scope constants for authentication credentials.
|
||||
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
|
||||
const (
|
||||
// Scope is the OAuth scope for Cloud Bigtable data operations.
|
||||
Scope = "https://www.googleapis.com/auth/bigtable.data"
|
||||
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
|
||||
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
|
||||
|
||||
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
|
||||
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
|
||||
|
||||
// ClusterAdminScope is the OAuth scope for Cloud Bigtable cluster admin operations.
|
||||
ClusterAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
|
||||
)
|
||||
|
||||
// clientUserAgent identifies the version of this package.
|
||||
// It should be bumped upon significant changes only.
|
||||
const clientUserAgent = "cbt-go/20150727"
|
||||
156
Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go
generated
vendored
Normal file
156
Godeps/_workspace/src/google.golang.org/cloud/bigtable/filter.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
btdpb "google.golang.org/cloud/bigtable/internal/data_proto"
|
||||
)
|
||||
|
||||
// A Filter represents a row filter.
|
||||
type Filter interface {
|
||||
String() string
|
||||
proto() *btdpb.RowFilter
|
||||
}
|
||||
|
||||
// ChainFilters returns a filter that applies a sequence of filters.
|
||||
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
|
||||
|
||||
type chainFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (cf chainFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range cf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " | ") + ")"
|
||||
}
|
||||
|
||||
func (cf chainFilter) proto() *btdpb.RowFilter {
|
||||
chain := &btdpb.RowFilter_Chain{}
|
||||
for _, sf := range cf.sub {
|
||||
chain.Filters = append(chain.Filters, sf.proto())
|
||||
}
|
||||
return &btdpb.RowFilter{
|
||||
Filter: &btdpb.RowFilter_Chain_{chain},
|
||||
}
|
||||
}
|
||||
|
||||
// InterleaveFilters returns a filter that applies a set of filters in parallel
|
||||
// and interleaves the results.
|
||||
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
|
||||
|
||||
type interleaveFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range ilf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " + ") + ")"
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) proto() *btdpb.RowFilter {
|
||||
inter := &btdpb.RowFilter_Interleave{}
|
||||
for _, sf := range ilf.sub {
|
||||
inter.Filters = append(inter.Filters, sf.proto())
|
||||
}
|
||||
return &btdpb.RowFilter{
|
||||
Filter: &btdpb.RowFilter_Interleave_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// RowKeyFilter returns a filter that matches cells from rows whose
|
||||
// key matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
|
||||
|
||||
type rowKeyFilter string
|
||||
|
||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
||||
|
||||
func (rkf rowKeyFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
|
||||
}
|
||||
|
||||
// FamilyFilter returns a filter that matches cells whose family name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
|
||||
|
||||
type familyFilter string
|
||||
|
||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
||||
|
||||
func (ff familyFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
|
||||
}
|
||||
|
||||
// ColumnFilter returns a filter that matches cells whose column name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
|
||||
|
||||
type columnFilter string
|
||||
|
||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
||||
|
||||
func (cf columnFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
|
||||
}
|
||||
|
||||
// ValueFilter returns a filter that matches cells whose value
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
|
||||
|
||||
type valueFilter string
|
||||
|
||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
||||
|
||||
func (vf valueFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
|
||||
}
|
||||
|
||||
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
||||
func LatestNFilter(n int) Filter { return latestNFilter(n) }
|
||||
|
||||
type latestNFilter int32
|
||||
|
||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
||||
|
||||
func (lnf latestNFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
|
||||
}
|
||||
|
||||
// StripValueFilter returns a filter that replaces each value with the empty string.
|
||||
func StripValueFilter() Filter { return stripValueFilter{} }
|
||||
|
||||
type stripValueFilter struct{}
|
||||
|
||||
func (stripValueFilter) String() string { return "strip_value()" }
|
||||
func (stripValueFilter) proto() *btdpb.RowFilter {
|
||||
return &btdpb.RowFilter{Filter: &btdpb.RowFilter_StripValueTransformer{true}}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): More filters: cond, col/ts/value range, sampling
|
||||
131
Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go
generated
vendored
Normal file
131
Godeps/_workspace/src/google.golang.org/cloud/bigtable/gc.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
durpb "google.golang.org/cloud/bigtable/internal/duration_proto"
|
||||
bttdpb "google.golang.org/cloud/bigtable/internal/table_data_proto"
|
||||
)
|
||||
|
||||
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
|
||||
type GCPolicy interface {
|
||||
String() string
|
||||
proto() *bttdpb.GcRule
|
||||
}
|
||||
|
||||
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
|
||||
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
|
||||
|
||||
type intersectionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range ip.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " && ") + ")"
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) proto() *bttdpb.GcRule {
|
||||
inter := &bttdpb.GcRule_Intersection{}
|
||||
for _, sp := range ip.sub {
|
||||
inter.Rules = append(inter.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Intersection_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
|
||||
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
|
||||
|
||||
type unionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (up unionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range up.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " || ") + ")"
|
||||
}
|
||||
|
||||
func (up unionPolicy) proto() *bttdpb.GcRule {
|
||||
union := &bttdpb.GcRule_Union{}
|
||||
for _, sp := range up.sub {
|
||||
union.Rules = append(union.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Union_{union},
|
||||
}
|
||||
}
|
||||
|
||||
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
|
||||
// except for the most recent n.
|
||||
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
|
||||
|
||||
type maxVersionsPolicy int
|
||||
|
||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
||||
|
||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
|
||||
}
|
||||
|
||||
// MaxAgePolicy returns a GC policy that applies to all cells
|
||||
// older than the given age.
|
||||
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
|
||||
|
||||
type maxAgePolicy time.Duration
|
||||
|
||||
var units = []struct {
|
||||
d time.Duration
|
||||
suffix string
|
||||
}{
|
||||
{24 * time.Hour, "d"},
|
||||
{time.Hour, "h"},
|
||||
{time.Minute, "m"},
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) String() string {
|
||||
d := time.Duration(ma)
|
||||
for _, u := range units {
|
||||
if d%u.d == 0 {
|
||||
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("age() > %d", d/time.Microsecond)
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
||||
// This doesn't handle overflows, etc.
|
||||
// Fix this if people care about GC policies over 290 years.
|
||||
ns := time.Duration(ma).Nanoseconds()
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
|
||||
Seconds: ns / 1e9,
|
||||
Nanos: int32(ns % 1e9),
|
||||
}},
|
||||
}
|
||||
}
|
||||
105
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go
generated
vendored
Normal file
105
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cbtrc/cbtrc.go
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cbtrc encapsulates common code for reading .cbtrc files.
|
||||
package cbtrc
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config represents a configuration.
|
||||
type Config struct {
|
||||
Project, Zone, Cluster string // required
|
||||
Creds string // optional
|
||||
}
|
||||
|
||||
// RegisterFlags registers a set of standard flags for this config.
|
||||
// It should be called before flag.Parse.
|
||||
func (c *Config) RegisterFlags() {
|
||||
flag.StringVar(&c.Project, "project", c.Project, "project ID")
|
||||
flag.StringVar(&c.Zone, "zone", c.Zone, "CBT zone")
|
||||
flag.StringVar(&c.Cluster, "cluster", c.Cluster, "CBT cluster")
|
||||
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
|
||||
}
|
||||
|
||||
// CheckFlags checks that the required config values are set.
|
||||
func (c *Config) CheckFlags() error {
|
||||
var missing []string
|
||||
if c.Project == "" {
|
||||
missing = append(missing, "-project")
|
||||
}
|
||||
if c.Zone == "" {
|
||||
missing = append(missing, "-zone")
|
||||
}
|
||||
if c.Cluster == "" {
|
||||
missing = append(missing, "-cluster")
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filename returns the filename consulted for standard configuration.
|
||||
func Filename() string {
|
||||
// TODO(dsymonds): Might need tweaking for Windows.
|
||||
return filepath.Join(os.Getenv("HOME"), ".cbtrc")
|
||||
}
|
||||
|
||||
// Load loads a .cbtrc file.
|
||||
// If the file is not present, an empty config is returned.
|
||||
func Load() (*Config, error) {
|
||||
filename := Filename()
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
// silent fail if the file isn't there
|
||||
if os.IsNotExist(err) {
|
||||
return &Config{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Reading %s: %v", filename, err)
|
||||
}
|
||||
c := new(Config)
|
||||
s := bufio.NewScanner(bytes.NewReader(data))
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
i := strings.Index(line, "=")
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
|
||||
}
|
||||
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
|
||||
switch key {
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
|
||||
case "project":
|
||||
c.Project = val
|
||||
case "zone":
|
||||
c.Zone = val
|
||||
case "cluster":
|
||||
c.Cluster = val
|
||||
case "creds":
|
||||
c.Creds = val
|
||||
}
|
||||
}
|
||||
return c, s.Err()
|
||||
}
|
||||
119
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go
generated
vendored
Normal file
119
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.pb.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package google_bigtable_admin_cluster_v1 is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Zone
|
||||
Cluster
|
||||
*/
|
||||
package google_bigtable_admin_cluster_v1
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type StorageType int32
|
||||
|
||||
const (
|
||||
// The storage type used is unspecified.
|
||||
StorageType_STORAGE_UNSPECIFIED StorageType = 0
|
||||
// Data will be stored in SSD, providing low and consistent latencies.
|
||||
StorageType_STORAGE_SSD StorageType = 1
|
||||
)
|
||||
|
||||
var StorageType_name = map[int32]string{
|
||||
0: "STORAGE_UNSPECIFIED",
|
||||
1: "STORAGE_SSD",
|
||||
}
|
||||
var StorageType_value = map[string]int32{
|
||||
"STORAGE_UNSPECIFIED": 0,
|
||||
"STORAGE_SSD": 1,
|
||||
}
|
||||
|
||||
func (x StorageType) String() string {
|
||||
return proto.EnumName(StorageType_name, int32(x))
|
||||
}
|
||||
|
||||
// Possible states of a zone.
|
||||
type Zone_Status int32
|
||||
|
||||
const (
|
||||
// The state of the zone is unknown or unspecified.
|
||||
Zone_UNKNOWN Zone_Status = 0
|
||||
// The zone is in a good state.
|
||||
Zone_OK Zone_Status = 1
|
||||
// The zone is down for planned maintenance.
|
||||
Zone_PLANNED_MAINTENANCE Zone_Status = 2
|
||||
// The zone is down for emergency or unplanned maintenance.
|
||||
Zone_EMERGENCY_MAINENANCE Zone_Status = 3
|
||||
)
|
||||
|
||||
var Zone_Status_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "OK",
|
||||
2: "PLANNED_MAINTENANCE",
|
||||
3: "EMERGENCY_MAINENANCE",
|
||||
}
|
||||
var Zone_Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"OK": 1,
|
||||
"PLANNED_MAINTENANCE": 2,
|
||||
"EMERGENCY_MAINENANCE": 3,
|
||||
}
|
||||
|
||||
func (x Zone_Status) String() string {
|
||||
return proto.EnumName(Zone_Status_name, int32(x))
|
||||
}
|
||||
|
||||
// A physical location in which a particular project can allocate Cloud BigTable
|
||||
// resources.
|
||||
type Zone struct {
|
||||
// A permanent unique identifier for the zone.
|
||||
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
// The name of this zone as it appears in UIs.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name" json:"display_name,omitempty"`
|
||||
// The current state of this zone.
|
||||
Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Zone) Reset() { *m = Zone{} }
|
||||
func (m *Zone) String() string { return proto.CompactTextString(m) }
|
||||
func (*Zone) ProtoMessage() {}
|
||||
|
||||
// An isolated set of Cloud BigTable resources on which tables can be hosted.
|
||||
type Cluster struct {
|
||||
// A permanent unique identifier for the cluster. For technical reasons, the
|
||||
// zone in which the cluster resides is included here.
|
||||
// Values are of the form
|
||||
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
// The descriptive name for this cluster as it appears in UIs.
|
||||
// Must be unique per zone.
|
||||
DisplayName string `protobuf:"bytes,4,opt,name=display_name" json:"display_name,omitempty"`
|
||||
// The number of serve nodes allocated to this cluster.
|
||||
ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes" json:"serve_nodes,omitempty"`
|
||||
// What storage type to use for tables in this cluster. Only configurable at
|
||||
// cluster creation time. If unspecified, STORAGE_SSD will be used.
|
||||
DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.bigtable.admin.cluster.v1.StorageType", StorageType_name, StorageType_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value)
|
||||
}
|
||||
89
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto
generated
vendored
Normal file
89
Godeps/_workspace/src/google.golang.org/cloud/bigtable/internal/cluster_data_proto/bigtable_cluster_data.proto
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright (c) 2015, Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.cluster.v1;
|
||||
|
||||
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableClusterDataProto";
|
||||
option java_package = "com.google.bigtable.admin.cluster.v1";
|
||||
|
||||
|
||||
// A physical location in which a particular project can allocate Cloud BigTable
|
||||
// resources.
|
||||
message Zone {
|
||||
// Possible states of a zone.
|
||||
enum Status {
|
||||
// The state of the zone is unknown or unspecified.
|
||||
UNKNOWN = 0;
|
||||
|
||||
// The zone is in a good state.
|
||||
OK = 1;
|
||||
|
||||
// The zone is down for planned maintenance.
|
||||
PLANNED_MAINTENANCE = 2;
|
||||
|
||||
// The zone is down for emergency or unplanned maintenance.
|
||||
EMERGENCY_MAINENANCE = 3;
|
||||
}
|
||||
|
||||
// A permanent unique identifier for the zone.
|
||||
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
|
||||
string name = 1;
|
||||
|
||||
// The name of this zone as it appears in UIs.
|
||||
string display_name = 2;
|
||||
|
||||
// The current state of this zone.
|
||||
Status status = 3;
|
||||
}
|
||||
|
||||
// An isolated set of Cloud BigTable resources on which tables can be hosted.
|
||||
message Cluster {
|
||||
// A permanent unique identifier for the cluster. For technical reasons, the
|
||||
// zone in which the cluster resides is included here.
|
||||
// Values are of the form
|
||||
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
|
||||
string name = 1;
|
||||
|
||||
// If this cluster has been deleted, the time at which its backup will
|
||||
// be irrevocably destroyed. Omitted otherwise.
|
||||
// This cannot be set directly, only through DeleteCluster.
|
||||
|
||||
// The operation currently running on the cluster, if any.
|
||||
// This cannot be set directly, only through CreateCluster, UpdateCluster,
|
||||
// or UndeleteCluster. Calls to these methods will be rejected if
|
||||
// "current_operation" is already set.
|
||||
|
||||
// The descriptive name for this cluster as it appears in UIs.
|
||||
// Must be unique per zone.
|
||||
string display_name = 4;
|
||||
|
||||
// The number of serve nodes allocated to this cluster.
|
||||
int32 serve_nodes = 5;
|
||||
|
||||
// What storage type to use for tables in this cluster. Only configurable at
|
||||
// cluster creation time. If unspecified, STORAGE_SSD will be used.
|
||||
StorageType default_storage_type = 8;
|
||||
}
|
||||
|
||||
enum StorageType {
|
||||
// The storage type used is unspecified.
|
||||
STORAGE_UNSPECIFIED = 0;
|
||||
|
||||
// Data will be stored in SSD, providing low and consistent latencies.
|
||||
STORAGE_SSD = 1;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user