1
mirror of https://github.com/rclone/rclone synced 2024-11-26 04:07:22 +01:00

vendor: add qingstor-sdk-go for QingStor

This commit is contained in:
wuyu 2017-06-26 05:45:22 +08:00 committed by Nick Craig-Wood
parent f682002b84
commit 466dd22b44
136 changed files with 15952 additions and 1 deletions

20
Gopkg.lock generated
View File

@ -13,6 +13,12 @@
revision = "a5913b3f7deecba45e98ff33cefbac4fd204ddd7"
version = "v0.10.0"
[[projects]]
name = "github.com/Sirupsen/logrus"
packages = ["."]
revision = "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/Unknwon/goconfig"
@ -127,6 +133,12 @@
packages = ["."]
revision = "4ed959e0540971545eddb8c75514973d670cf739"
[[projects]]
name = "github.com/pengsrc/go-shared"
packages = ["check","convert","json","yaml"]
revision = "454950d6a0782c34427d4f29b46c6bf447256f20"
version = "v0.0.8"
[[projects]]
branch = "master"
name = "github.com/pkg/errors"
@ -193,6 +205,12 @@
packages = ["."]
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
[[projects]]
name = "github.com/yunify/qingstor-sdk-go"
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"]
revision = "26f2cc6f249f4c2a08ed89f1d7d566a463c1dfc2"
version = "v2.2.5"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
@ -250,6 +268,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "b04805ec8dc2bf704d0ad0c73e2ce9f13f3770b946a70bda8fd602886ff124a4"
inputs-digest = "904bc0ea1f770b0473b24560dc0d24c5c647971e959e58538799c5cad1eaa97e"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -132,3 +132,7 @@
[[constraint]]
branch = "master"
name = "github.com/ncw/dropbox-sdk-go-unofficial"
[[constraint]]
branch = "master"
name = "github.com/yunify/qingstor-sdk-go"

1
vendor/github.com/Sirupsen/logrus/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
logrus

13
vendor/github.com/Sirupsen/logrus/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,13 @@
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- tip
env:
- GOMAXPROCS=4 GORACE=halt_on_error=1
install:
- go get github.com/stretchr/testify/assert
script:
- go test -race -v .
- cd hooks/null && go test -race -v .

100
vendor/github.com/Sirupsen/logrus/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,100 @@
# 1.0.0
* Officially changed name to lower-case
* bug: colors on Windows 10 (#541)
* bug: fix race in accessing level (#512)
# 0.11.5
* feature: add writer and writerlevel to entry (#372)
# 0.11.4
* bug: fix undefined variable on solaris (#493)
# 0.11.3
* formatter: configure quoting of empty values (#484)
* formatter: configure quoting character (default is `"`) (#484)
* bug: fix not importing io correctly in non-linux environments (#481)
# 0.11.2
* bug: fix windows terminal detection (#476)
# 0.11.1
* bug: fix tty detection with custom out (#471)
# 0.11.0
* performance: Use bufferpool to allocate (#370)
* terminal: terminal detection for app-engine (#343)
* feature: exit handler (#375)
# 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7
* logrus/core: fix possible race (#216)
* logrus/doc: small typo fixes and doc improvements
# 0.8.6
* hooks/raven: allow passing an initialized client
# 0.8.5
* logrus/core: revert #208
# 0.8.4
* formatter/text: fix data race (#218)
# 0.8.3
* logrus/core: fix entry log level (#208)
* logrus/core: improve performance of text formatter by 40%
* logrus/core: expose `LevelHooks` type
* logrus/core: add support for DragonflyBSD and NetBSD
* formatter/text: print structs more verbosely
# 0.8.2
* logrus: fix more Fatal family functions
# 0.8.1
* logrus: fix not exiting on `Fatalf` and `Fatalln`
# 0.8.0
* logrus: defaults to stderr instead of stdout
* hooks/sentry: add special field for `*http.Request`
* formatter/text: ignore Windows for colors
# 0.7.3
* formatter/\*: allow configuration of timestamp layout
# 0.7.2
* formatter/text: Add configuration option for time format (#158)

21
vendor/github.com/Sirupsen/logrus/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Simon Eskildsen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

501
vendor/github.com/Sirupsen/logrus/README.md generated vendored Normal file
View File

@ -0,0 +1,501 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
many large deployments. The core API is unlikely to change much but please
version control your Logrus to make sure you aren't fetching latest `master` on
every build.**
**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
realize the consequences of renaming to lower-case. Due to the Go package
environment, this caused issues. Regretfully, there's no turning back now.
Everything using `logrus` will need to use the lower-case:
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
I am terribly sorry for this inconvenience. Logrus strives hard for backwards
compatibility, and the author failed to realize the cascading consequences of
such a name-change. To fix Glide, see [these
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
{"level":"warning","msg":"The group's number increased tremendously!",
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
exit status 1
```
#### Case-sensitivity
The organization's name was changed to lower-case--and this will not be changed
back. If you are getting import conflicts due to case sensitivity, please use
the lower-case import: `github.com/sirupsen/logrus`.
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
```go
package main
import (
log "github.com/sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
}).Info("A walrus appears")
}
```
Note that it's completely api-compatible with the stdlib logger, so you can
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
```go
package main
import (
"os"
log "github.com/sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
}
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(log.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(log.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
// A common pattern is to re-use fields between logging statements by re-using
// the logrus.Entry returned from WithFields()
contextLogger := log.WithFields(log.Fields{
"common": "this is a common field",
"other": "I also should be logged always",
})
contextLogger.Info("I'll be logged with common and other field")
contextLogger.Info("Me too")
}
```
For more advanced usage such as logging to multiple locations from the same
application, you can also create an instance of the `logrus` Logger:
```go
package main
import (
"os"
"github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
log.Out = os.Stdout
// You could set this to any `io.Writer` such as a file
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
}
```
#### Fields
Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
```go
log.WithFields(log.Fields{
"event": event,
"topic": topic,
"key": key,
}).Fatal("Failed to send event")
```
We've found this API forces you to think about logging in a way that produces
much more useful logging messages. We've been in countless situations where just
a single added field to a log statement that was already there would've saved us
hours. The `WithFields` call is optional.
In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
#### Default Fields
Often it's helpful to have fields _always_ attached to log statements in an
application or parts of one. For example, you may want to always log the
`request_id` and `user_ip` in the context of a request. Instead of writing
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
every line, you can create a `logrus.Entry` to pass around instead:
```go
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
requestLogger.Info("something happened on that request") # will log request_id and user_ip
requestLogger.Warn("something not great happened")
```
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
`init`:
```go
import (
log "github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
// Use the Airbrake hook to report errors that have Error severity or above to
// an exception tracker. You can create custom hooks, see the Hooks section.
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
log.Error("Unable to connect to local syslog daemon")
} else {
log.AddHook(hook)
}
}
```
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
```go
log.Debug("Useful debugging information.")
log.Info("Something noteworthy happened!")
log.Warn("You should probably take a look at this.")
log.Error("Something failed but I'm not quitting.")
// Calls os.Exit(1) after logging
log.Fatal("Bye.")
// Calls panic() after logging
log.Panic("I'm bailing.")
```
You can set the logging level on a `Logger`, then it will only log entries with
that severity or anything above it:
```go
// Will log anything that is info or above (warn, error, fatal, panic). Default.
log.SetLevel(log.InfoLevel)
```
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
automatically added to all logging events:
1. `time`. The timestamp when the entry was created.
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
the `AddFields` call. E.g. `Failed to send event.`
3. `level`. The logging level. E.g. `info`.
#### Environments
Logrus has no notion of environment.
If you wish for hooks and formatters to only be used in specific environments,
you should handle that yourself. For example, if your application has a global
variable `Environment`, which is a string representation of the environment you
could do:
```go
import (
log "github.com/sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
}
}
```
This configuration is how `logrus` was intended to be used, but JSON in
production is mostly only useful if you do log aggregation with tools like
Splunk or Logstash.
#### Formatters
The built-in logging formatters are:
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`. For Windows, see
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
`Fields` type (`map[string]interface{}`) with all your fields as well as the
default ones (see Entries section above):
```go
type MyJSONFormatter struct {
}
log.SetFormatter(new(MyJSONFormatter))
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}
```
#### Logger as an `io.Writer`
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
defer w.Close()
srv := http.Server{
// create a stdlib log.Logger that writes to
// logrus.Logger.
ErrorLog: log.New(w, "", 0),
}
```
Each line written to that writer will be printed the usual way, using formatters
and hooks. The level for those entries is `info`.
This means that we can override the standard library logger easily:
```go
logger := logrus.New()
logger.Formatter = &logrus.JSONFormatter{}
// Use logrus for standard log output
// Note that `log` here references stdlib's log
// Not logrus imported under the name `log`.
log.SetOutput(logger.Writer())
```
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
#### Tools
| Tool | Description |
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
import(
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/null"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSomething(t*testing.T){
logger, hook := null.NewNullLogger()
logger.Error("Helloerror")
assert.Equal(t, 1, len(hook.Entries))
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
hook.Reset()
assert.Nil(t, hook.LastEntry())
}
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```
#### Thread safety
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
Situation when locking is not needed includes:
* You have no hooks registered, or hooks calling is already thread-safe.
* Writing to logger.Out is already thread-safe, for example:
1) logger.Out is protected by locks.
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

74
vendor/github.com/Sirupsen/logrus/alt_exit_test.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package logrus
import (
"io/ioutil"
"os/exec"
"testing"
"time"
)
func TestRegister(t *testing.T) {
current := len(handlers)
RegisterExitHandler(func() {})
if len(handlers) != current+1 {
t.Fatalf("can't add handler")
}
}
func TestHandler(t *testing.T) {
gofile := "/tmp/testprog.go"
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
t.Fatalf("can't create go file")
}
outfile := "/tmp/testprog.out"
arg := time.Now().UTC().String()
err := exec.Command("go", "run", gofile, outfile, arg).Run()
if err == nil {
t.Fatalf("completed normally, should have failed")
}
data, err := ioutil.ReadFile(outfile)
if err != nil {
t.Fatalf("can't read output file %s", outfile)
}
if string(data) != arg {
t.Fatalf("bad data")
}
}
var testprog = []byte(`
// Test program for atexit, gets output file and data as arguments and writes
// data to output file in atexit handler.
package main
import (
"github.com/sirupsen/logrus"
"flag"
"fmt"
"io/ioutil"
)
var outfile = ""
var data = ""
func handler() {
ioutil.WriteFile(outfile, []byte(data), 0666)
}
func badHandler() {
n := 0
fmt.Println(1/n)
}
func main() {
flag.Parse()
outfile = flag.Arg(0)
data = flag.Arg(1)
logrus.RegisterExitHandler(handler)
logrus.RegisterExitHandler(badHandler)
logrus.Fatal("Bye bye")
}
`)

26
vendor/github.com/Sirupsen/logrus/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/sirupsen/logrus
*/
package logrus

275
vendor/github.com/Sirupsen/logrus/entry.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
package logrus
import (
"bytes"
"fmt"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

77
vendor/github.com/Sirupsen/logrus/entry_test.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package logrus
import (
"bytes"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEntryWithError(t *testing.T) {
assert := assert.New(t)
defer func() {
ErrorKey = "error"
}()
err := fmt.Errorf("kaboom at layer %d", 4711)
assert.Equal(err, WithError(err).Data["error"])
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
assert.Equal(err, entry.WithError(err).Data["error"])
ErrorKey = "err"
assert.Equal(err, entry.WithError(err).Data["err"])
}
func TestEntryPanicln(t *testing.T) {
errBoom := fmt.Errorf("boom time")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicln("kaboom")
}
func TestEntryPanicf(t *testing.T) {
errBoom := fmt.Errorf("boom again")
defer func() {
p := recover()
assert.NotNil(t, p)
switch pVal := p.(type) {
case *Entry:
assert.Equal(t, "kaboom true", pVal.Message)
assert.Equal(t, errBoom, pVal.Data["err"])
default:
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
}
}()
logger := New()
logger.Out = &bytes.Buffer{}
entry := NewEntry(logger)
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
}

View File

@ -0,0 +1,59 @@
package main
import (
"github.com/sirupsen/logrus"
// "os"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.JSONFormatter)
log.Formatter = new(logrus.TextFormatter) // default
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
// if err == nil {
// log.Out = file
// } else {
// log.Info("Failed to log to file, using default stderr")
// }
log.Level = logrus.DebugLevel
}
func main() {
defer func() {
err := recover()
if err != nil {
log.WithFields(logrus.Fields{
"omg": true,
"err": err,
"number": 100,
}).Fatal("The ice breaks!")
}
}()
log.WithFields(logrus.Fields{
"animal": "walrus",
"number": 8,
}).Debug("Started observing beach")
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"temperature": -4,
}).Debug("Temperature changes")
log.WithFields(logrus.Fields{
"animal": "orca",
"size": 9009,
}).Panic("It's over 9000!")
}

View File

@ -0,0 +1,30 @@
package main
import (
"github.com/sirupsen/logrus"
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
)
var log = logrus.New()
func init() {
log.Formatter = new(logrus.TextFormatter) // default
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
}
func main() {
log.WithFields(logrus.Fields{
"animal": "walrus",
"size": 10,
}).Info("A group of walrus emerges from the ocean")
log.WithFields(logrus.Fields{
"omg": true,
"number": 122,
}).Warn("The group's number increased tremendously!")
log.WithFields(logrus.Fields{
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
}

193
vendor/github.com/Sirupsen/logrus/exported.go generated vendored Normal file
View File

@ -0,0 +1,193 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.setLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.level()
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

45
vendor/github.com/Sirupsen/logrus/formatter.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@ -0,0 +1,101 @@
package logrus
import (
"fmt"
"testing"
"time"
)
// smallFields is a small size data set for benchmarking
var smallFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
// largeFields is a large size data set for benchmarking
var largeFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
"five": "six",
"seven": "eight",
"nine": "ten",
"eleven": "twelve",
"thirteen": "fourteen",
"fifteen": "sixteen",
"seventeen": "eighteen",
"nineteen": "twenty",
"a": "b",
"c": "d",
"e": "f",
"g": "h",
"i": "j",
"k": "l",
"m": "n",
"o": "p",
"q": "r",
"s": "t",
"u": "v",
"w": "x",
"y": "z",
"this": "will",
"make": "thirty",
"entries": "yeah",
}
var errorFields = Fields{
"foo": fmt.Errorf("bar"),
"baz": fmt.Errorf("qux"),
}
func BenchmarkErrorTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
}
func BenchmarkSmallTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkLargeTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
}
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
}
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
}
func BenchmarkSmallJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, smallFields)
}
func BenchmarkLargeJSONFormatter(b *testing.B) {
doBenchmark(b, &JSONFormatter{}, largeFields)
}
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
logger := New()
entry := &Entry{
Time: time.Time{},
Level: InfoLevel,
Message: "message",
Data: fields,
Logger: logger,
}
var d []byte
var err error
for i := 0; i < b.N; i++ {
d, err = formatter.Format(entry)
if err != nil {
b.Fatal(err)
}
b.SetBytes(int64(len(d)))
}
}

122
vendor/github.com/Sirupsen/logrus/hook_test.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package logrus
import (
"testing"
"github.com/stretchr/testify/assert"
)
type TestHook struct {
Fired bool
}
func (hook *TestHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *TestHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookFires(t *testing.T) {
hook := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
assert.Equal(t, hook.Fired, false)
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}
type ModifyHook struct {
}
func (hook *ModifyHook) Fire(entry *Entry) error {
entry.Data["wow"] = "whale"
return nil
}
func (hook *ModifyHook) Levels() []Level {
return []Level{
DebugLevel,
InfoLevel,
WarnLevel,
ErrorLevel,
FatalLevel,
PanicLevel,
}
}
func TestHookCanModifyEntry(t *testing.T) {
hook := new(ModifyHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
})
}
func TestCanFireMultipleHooks(t *testing.T) {
hook1 := new(ModifyHook)
hook2 := new(TestHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook1)
log.Hooks.Add(hook2)
log.WithField("wow", "elephant").Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["wow"], "whale")
assert.Equal(t, hook2.Fired, true)
})
}
type ErrorHook struct {
Fired bool
}
func (hook *ErrorHook) Fire(entry *Entry) error {
hook.Fired = true
return nil
}
func (hook *ErrorHook) Levels() []Level {
return []Level{
ErrorLevel,
}
}
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, false)
})
}
func TestErrorHookShouldFireOnError(t *testing.T) {
hook := new(ErrorHook)
LogAndAssertJSON(t, func(log *Logger) {
log.Hooks.Add(hook)
log.Error("test")
}, func(fields Fields) {
assert.Equal(t, hook.Fired, true)
})
}

34
vendor/github.com/Sirupsen/logrus/hooks.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,39 @@
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
## Usage
```go
import (
"log/syslog"
"github.com/sirupsen/logrus"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
```go
import (
"log/syslog"
"github.com/sirupsen/logrus"
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
)
func main() {
log := logrus.New()
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
if err == nil {
log.Hooks.Add(hook)
}
}
```

View File

@ -0,0 +1,54 @@
// +build !windows,!nacl,!plan9
package logrus_syslog
import (
"fmt"
"github.com/sirupsen/logrus"
"log/syslog"
"os"
)
// SyslogHook to send logs via syslog.
type SyslogHook struct {
Writer *syslog.Writer
SyslogNetwork string
SyslogRaddr string
}
// Creates a hook to be added to an instance of logger. This is called with
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
// `if err == nil { log.Hooks.Add(hook) }`
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
w, err := syslog.Dial(network, raddr, priority, tag)
return &SyslogHook{w, network, raddr}, err
}
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
line, err := entry.String()
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
return err
}
switch entry.Level {
case logrus.PanicLevel:
return hook.Writer.Crit(line)
case logrus.FatalLevel:
return hook.Writer.Crit(line)
case logrus.ErrorLevel:
return hook.Writer.Err(line)
case logrus.WarnLevel:
return hook.Writer.Warning(line)
case logrus.InfoLevel:
return hook.Writer.Info(line)
case logrus.DebugLevel:
return hook.Writer.Debug(line)
default:
return nil
}
}
func (hook *SyslogHook) Levels() []logrus.Level {
return logrus.AllLevels
}

View File

@ -0,0 +1,26 @@
package logrus_syslog
import (
"github.com/sirupsen/logrus"
"log/syslog"
"testing"
)
func TestLocalhostAddAndPrint(t *testing.T) {
log := logrus.New()
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
t.Errorf("Unable to connect to local syslog.")
}
log.Hooks.Add(hook)
for _, level := range hook.Levels() {
if len(log.Hooks[level]) != 1 {
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
}
}
log.Info("Congratulations!")
}

95
vendor/github.com/Sirupsen/logrus/hooks/test/test.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
// The Test package is used for testing logrus. It is here for backwards
// compatibility from when logrus' organization was upper-case. Please use
// lower-case logrus and the `null` package instead of this one.
package test
import (
"io/ioutil"
"sync"
"github.com/sirupsen/logrus"
)
// Hook is a hook designed for dealing with logs in test scenarios.
type Hook struct {
// Entries is an array of all entries that have been received by this hook.
// For safe access, use the AllEntries() method, rather than reading this
// value directly.
Entries []*logrus.Entry
mu sync.RWMutex
}
// NewGlobal installs a test hook for the global logger.
func NewGlobal() *Hook {
hook := new(Hook)
logrus.AddHook(hook)
return hook
}
// NewLocal installs a test hook for a given local logger.
func NewLocal(logger *logrus.Logger) *Hook {
hook := new(Hook)
logger.Hooks.Add(hook)
return hook
}
// NewNullLogger creates a discarding logger and installs the test hook.
func NewNullLogger() (*logrus.Logger, *Hook) {
logger := logrus.New()
logger.Out = ioutil.Discard
return logger, NewLocal(logger)
}
func (t *Hook) Fire(e *logrus.Entry) error {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = append(t.Entries, e)
return nil
}
func (t *Hook) Levels() []logrus.Level {
return logrus.AllLevels
}
// LastEntry returns the last entry that was logged or nil.
func (t *Hook) LastEntry() *logrus.Entry {
t.mu.RLock()
defer t.mu.RUnlock()
i := len(t.Entries) - 1
if i < 0 {
return nil
}
// Make a copy, for safety
e := *t.Entries[i]
return &e
}
// AllEntries returns all entries that were logged.
func (t *Hook) AllEntries() []*logrus.Entry {
t.mu.RLock()
defer t.mu.RUnlock()
// Make a copy so the returned value won't race with future log requests
entries := make([]*logrus.Entry, len(t.Entries))
for i, entry := range t.Entries {
// Make a copy, for safety
e := *entry
entries[i] = &e
}
return entries
}
// Reset removes all Entries from this test hook.
func (t *Hook) Reset() {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = make([]*logrus.Entry, 0)
}

View File

@ -0,0 +1,39 @@
package test
import (
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestAllHooks(t *testing.T) {
assert := assert.New(t)
logger, hook := NewNullLogger()
assert.Nil(hook.LastEntry())
assert.Equal(0, len(hook.Entries))
logger.Error("Hello error")
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
assert.Equal(1, len(hook.Entries))
logger.Warn("Hello warning")
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
assert.Equal("Hello warning", hook.LastEntry().Message)
assert.Equal(2, len(hook.Entries))
hook.Reset()
assert.Nil(hook.LastEntry())
assert.Equal(0, len(hook.Entries))
hook = NewGlobal()
logrus.Error("Hello error")
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
assert.Equal(1, len(hook.Entries))
}

74
vendor/github.com/Sirupsen/logrus/json_formatter.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package logrus
import (
"encoding/json"
"fmt"
)
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message",
// },
// }
FieldMap FieldMap
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@ -0,0 +1,199 @@
package logrus
import (
"encoding/json"
"errors"
"strings"
"testing"
)
func TestErrorNotLost(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["error"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["omg"] != "wild walrus" {
t.Fatal("Error field not set")
}
}
func TestFieldClashWithTime(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("time", "right now!"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.time"] != "right now!" {
t.Fatal("fields.time not set to original time field")
}
if entry["time"] != "0001-01-01T00:00:00Z" {
t.Fatal("time field not set to current time, was: ", entry["time"])
}
}
func TestFieldClashWithMsg(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("msg", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.msg"] != "something" {
t.Fatal("fields.msg not set to original msg field")
}
}
func TestFieldClashWithLevel(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
if entry["fields.level"] != "something" {
t.Fatal("fields.level not set to original level field")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
if b[len(b)-1] != '\n' {
t.Fatal("Expected JSON log entry to end with a newline")
}
}
func TestJSONMessageKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyMsg: "message",
},
}
b, err := formatter.Format(&Entry{Message: "oh hai"})
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
t.Fatal("Expected JSON to format message key")
}
}
func TestJSONLevelKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyLevel: "somelevel",
},
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, "somelevel") {
t.Fatal("Expected JSON to format level key")
}
}
func TestJSONTimeKey(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyTime: "timeywimey",
},
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, "timeywimey") {
t.Fatal("Expected JSON to format time key")
}
}
func TestJSONDisableTimestamp(t *testing.T) {
formatter := &JSONFormatter{
DisableTimestamp: true,
}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if strings.Contains(s, FieldKeyTime) {
t.Error("Did not prevent timestamp", s)
}
}
func TestJSONEnableTimestamp(t *testing.T) {
formatter := &JSONFormatter{}
b, err := formatter.Format(WithField("level", "something"))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
s := string(b)
if !strings.Contains(s, FieldKeyTime) {
t.Error("Timestamp not present", s)
}
}

317
vendor/github.com/Sirupsen/logrus/logger.go generated vendored Normal file
View File

@ -0,0 +1,317 @@
package logrus
import (
"io"
"os"
"sync"
"sync/atomic"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.level() >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.level() >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.level() >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.level() >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.level() >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}
func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
func (logger *Logger) setLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}

61
vendor/github.com/Sirupsen/logrus/logger_bench_test.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package logrus
import (
"os"
"testing"
)
// smallFields is a small size data set for benchmarking
var loggerFields = Fields{
"foo": "bar",
"baz": "qux",
"one": "two",
"three": "four",
}
func BenchmarkDummyLogger(b *testing.B) {
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
if err != nil {
b.Fatalf("%v", err)
}
defer nullf.Close()
doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
}
func BenchmarkDummyLoggerNoLock(b *testing.B) {
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
if err != nil {
b.Fatalf("%v", err)
}
defer nullf.Close()
doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
}
func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
logger := Logger{
Out: out,
Level: InfoLevel,
Formatter: formatter,
}
entry := logger.WithFields(fields)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
entry.Info("aaa")
}
})
}
func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
logger := Logger{
Out: out,
Level: InfoLevel,
Formatter: formatter,
}
logger.SetNoLock()
entry := logger.WithFields(fields)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
entry.Info("aaa")
}
})
}

143
vendor/github.com/Sirupsen/logrus/logrus.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

386
vendor/github.com/Sirupsen/logrus/logrus_test.go generated vendored Normal file
View File

@ -0,0 +1,386 @@
package logrus
import (
"bytes"
"encoding/json"
"strconv"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
log(logger)
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assertions(fields)
}
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
var buffer bytes.Buffer
logger := New()
logger.Out = &buffer
logger.Formatter = &TextFormatter{
DisableColors: true,
}
log(logger)
fields := make(map[string]string)
for _, kv := range strings.Split(buffer.String(), " ") {
if !strings.Contains(kv, "=") {
continue
}
kvArr := strings.Split(kv, "=")
key := strings.TrimSpace(kvArr[0])
val := kvArr[1]
if kvArr[1][0] == '"' {
var err error
val, err = strconv.Unquote(val)
assert.NoError(t, err)
}
fields[key] = val
}
assertions(fields)
}
func TestPrint(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Print("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestInfo(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "info")
})
}
func TestWarn(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Warn("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["level"], "warning")
})
}
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test test")
})
}
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test 10")
})
}
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Infoln(10, 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "10 10")
})
}
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", 10)
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test10")
})
}
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.Info("test", "test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "testtest")
})
}
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
localLog := logger.WithFields(Fields{
"key1": "value1",
})
localLog.WithField("key2", "value2").Info("test")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
assert.Equal(t, "value2", fields["key2"])
assert.Equal(t, "value1", fields["key1"])
buffer = bytes.Buffer{}
fields = Fields{}
localLog.Info("test")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.Nil(t, err)
_, ok := fields["key2"]
assert.Equal(t, false, ok)
assert.Equal(t, "value1", fields["key1"])
}
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
})
}
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("msg", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["msg"], "test")
assert.Equal(t, fields["fields.msg"], "hello")
})
}
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("time", "hello").Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["fields.time"], "hello")
})
}
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("level", 1).Info("test")
}, func(fields Fields) {
assert.Equal(t, fields["level"], "info")
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
})
}
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
LogAndAssertText(t, func(log *Logger) {
ll := log.WithField("herp", "derp")
ll.Info("hello")
ll.Info("bye")
}, func(fields map[string]string) {
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
if _, ok := fields[fieldName]; ok {
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
}
}
})
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
var fields Fields
logger := New()
logger.Out = &buffer
logger.Formatter = new(JSONFormatter)
llog := logger.WithField("context", "eating raw fish")
llog.Info("looks delicious")
err := json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded first message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "looks delicious")
assert.Equal(t, fields["context"], "eating raw fish")
buffer.Reset()
llog.Warn("omg it is!")
err = json.Unmarshal(buffer.Bytes(), &fields)
assert.NoError(t, err, "should have decoded second message")
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
assert.Equal(t, fields["msg"], "omg it is!")
assert.Equal(t, fields["context"], "eating raw fish")
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
}
func TestConvertLevelToString(t *testing.T) {
assert.Equal(t, "debug", DebugLevel.String())
assert.Equal(t, "info", InfoLevel.String())
assert.Equal(t, "warning", WarnLevel.String())
assert.Equal(t, "error", ErrorLevel.String())
assert.Equal(t, "fatal", FatalLevel.String())
assert.Equal(t, "panic", PanicLevel.String())
}
func TestParseLevel(t *testing.T) {
l, err := ParseLevel("panic")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("PANIC")
assert.Nil(t, err)
assert.Equal(t, PanicLevel, l)
l, err = ParseLevel("fatal")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("FATAL")
assert.Nil(t, err)
assert.Equal(t, FatalLevel, l)
l, err = ParseLevel("error")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("ERROR")
assert.Nil(t, err)
assert.Equal(t, ErrorLevel, l)
l, err = ParseLevel("warn")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("WARN")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("warning")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("WARNING")
assert.Nil(t, err)
assert.Equal(t, WarnLevel, l)
l, err = ParseLevel("info")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("INFO")
assert.Nil(t, err)
assert.Equal(t, InfoLevel, l)
l, err = ParseLevel("debug")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("DEBUG")
assert.Nil(t, err)
assert.Equal(t, DebugLevel, l)
l, err = ParseLevel("invalid")
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
}
func TestGetSetLevelRace(t *testing.T) {
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
if i%2 == 0 {
SetLevel(InfoLevel)
} else {
GetLevel()
}
}(i)
}
wg.Wait()
}
func TestLoggingRace(t *testing.T) {
logger := New()
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func() {
logger.Info("info")
wg.Done()
}()
}
wg.Wait()
}
// Compile test
func TestLogrusInterface(t *testing.T) {
var buffer bytes.Buffer
fn := func(l FieldLogger) {
b := l.WithField("key", "value")
b.Debug("Test")
}
// test logger
logger := New()
logger.Out = &buffer
fn(logger)
// test Entry
e := logger.WithField("another", "value")
fn(e)
}
// Implements io.Writer using channels for synchronization, so we can wait on
// the Entry.Writer goroutine to write in a non-racey way. This does assume that
// there is a single call to Logger.Out for each message.
type channelWriter chan []byte
func (cw channelWriter) Write(p []byte) (int, error) {
cw <- p
return len(p), nil
}
func TestEntryWriter(t *testing.T) {
cw := channelWriter(make(chan []byte, 1))
log := New()
log.Out = cw
log.Formatter = new(JSONFormatter)
log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
bs := <-cw
var fields Fields
err := json.Unmarshal(bs, &fields)
assert.Nil(t, err)
assert.Equal(t, fields["foo"], "bar")
assert.Equal(t, fields["level"], "warning")
}

View File

@ -0,0 +1,10 @@
// +build appengine
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
return true
}

10
vendor/github.com/Sirupsen/logrus/terminal_bsd.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

14
vendor/github.com/Sirupsen/logrus/terminal_linux.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -0,0 +1,28 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
var termios Termios
switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
default:
return false
}
}

21
vendor/github.com/Sirupsen/logrus/terminal_solaris.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// +build solaris,!appengine
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

82
vendor/github.com/Sirupsen/logrus/terminal_windows.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows,!appengine
package logrus
import (
"bytes"
"errors"
"io"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
)
const (
enableProcessedOutput = 0x0001
enableWrapAtEolOutput = 0x0002
enableVirtualTerminalProcessing = 0x0004
)
func getVersion() (float64, error) {
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
cmd := exec.Command("cmd", "ver")
cmd.Stdout = stdout
cmd.Stderr = stderr
err := cmd.Run()
if err != nil {
return -1, err
}
// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
version := strings.Replace(stdout.String(), "\n", "", -1)
version = strings.Replace(version, "\r\n", "", -1)
x1 := strings.Index(version, "[Version")
if x1 == -1 || strings.Index(version, "]") == -1 {
return -1, errors.New("Can't determine Windows version")
}
return strconv.ParseFloat(version[x1+9:x1+13], 64)
}
func init() {
ver, err := getVersion()
if err != nil {
return
}
// Activate Virtual Processing for Windows CMD
// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
if ver >= 10 {
handle := syscall.Handle(os.Stderr.Fd())
procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
}
}
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
}

189
vendor/github.com/Sirupsen/logrus/text_formatter.go generated vendored Normal file
View File

@ -0,0 +1,189 @@
package logrus
import (
"bytes"
"fmt"
"sort"
"strings"
"sync"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
)
func init() {
baseTimestamp = time.Now()
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return true
}
}
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !f.needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
errmsg := value.Error()
if !f.needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
fmt.Fprint(b, value)
}
}

View File

@ -0,0 +1,87 @@
package logrus
import (
"bytes"
"errors"
"strings"
"testing"
"time"
)
func TestQuoting(t *testing.T) {
tf := &TextFormatter{DisableColors: true}
checkQuoting := func(q bool, value interface{}) {
b, _ := tf.Format(WithField("test", value))
idx := bytes.Index(b, ([]byte)("test="))
cont := bytes.Contains(b[idx+5:], []byte(tf.QuoteCharacter))
if cont != q {
if q {
t.Errorf("quoting expected for: %#v", value)
} else {
t.Errorf("quoting not expected for: %#v", value)
}
}
}
checkQuoting(false, "")
checkQuoting(false, "abcd")
checkQuoting(false, "v1.0")
checkQuoting(false, "1234567890")
checkQuoting(true, "/foobar")
checkQuoting(true, "x y")
checkQuoting(true, "x,y")
checkQuoting(false, errors.New("invalid"))
checkQuoting(true, errors.New("invalid argument"))
// Test for custom quote character.
tf.QuoteCharacter = "`"
checkQuoting(false, "")
checkQuoting(false, "abcd")
checkQuoting(true, "/foobar")
checkQuoting(true, errors.New("invalid argument"))
// Test for multi-character quotes.
tf.QuoteCharacter = "§~±"
checkQuoting(false, "abcd")
checkQuoting(true, errors.New("invalid argument"))
// Test for quoting empty fields.
tf.QuoteEmptyFields = true
checkQuoting(true, "")
checkQuoting(false, "abcd")
checkQuoting(true, errors.New("invalid argument"))
}
func TestTimestampFormat(t *testing.T) {
checkTimeStr := func(format string) {
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
customStr, _ := customFormatter.Format(WithField("test", "test"))
timeStart := bytes.Index(customStr, ([]byte)("time="))
timeEnd := bytes.Index(customStr, ([]byte)("level="))
timeStr := customStr[timeStart+5+len(customFormatter.QuoteCharacter) : timeEnd-1-len(customFormatter.QuoteCharacter)]
if format == "" {
format = time.RFC3339
}
_, e := time.Parse(format, (string)(timeStr))
if e != nil {
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
}
}
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
checkTimeStr("Mon Jan _2 15:04:05 2006")
checkTimeStr("")
}
func TestDisableTimestampWithColoredOutput(t *testing.T) {
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
b, _ := tf.Format(WithField("test", "test"))
if strings.Contains(string(b), "[0000]") {
t.Error("timestamp not expected when DisableTimestamp is true")
}
}
// TODO add tests for sorting etc., this requires a parser for the text
// formatter output.

62
vendor/github.com/Sirupsen/logrus/writer.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = entry.Debug
case InfoLevel:
printFunc = entry.Info
case WarnLevel:
printFunc = entry.Warn
case ErrorLevel:
printFunc = entry.Error
case FatalLevel:
printFunc = entry.Fatal
case PanicLevel:
printFunc = entry.Panic
default:
printFunc = entry.Print
}
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

28
vendor/github.com/pengsrc/go-shared/.gitignore generated vendored Normal file
View File

@ -0,0 +1,28 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
vendor
coverage

35
vendor/github.com/pengsrc/go-shared/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,35 @@
language: go
go:
- 1.8
- 1.7
- 1.6
cache:
directories:
- ${HOME}/source
before_install:
- pushd ${HOME}/source
- if [[ ! -d "./make-4.0" ]]; then
wget http://ftp.gnu.org/gnu/make/make-4.0.tar.gz &&
tar -vxzf make-4.0.tar.gz &&
pushd make-4.0 && ./configure --prefix=/usr/local && make && popd;
fi
- pushd make-4.0 && sudo make install && popd
- if [[ ! -d "./glide-v0.12.3" ]]; then
wget https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-linux-amd64.tar.gz &&
tar -vxzf glide-v0.12.3-linux-amd64.tar.gz &&
mv linux-amd64 glide-v0.12.3;
fi
- pushd glide-v0.12.3 && sudo cp glide /usr/local/bin && popd
- popd
- /usr/local/bin/make --version
- /usr/local/bin/glide --version
install:
- go get -u github.com/golang/lint/golint
- glide install
script:
- /usr/local/bin/make check
- /usr/local/bin/make test-coverage

202
vendor/github.com/pengsrc/go-shared/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 Jingwen Peng
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

61
vendor/github.com/pengsrc/go-shared/Makefile generated vendored Normal file
View File

@ -0,0 +1,61 @@
SHELL := /bin/bash
PACKAGE_NAME="github.com/pengsrc/go-shared"
DIRS_TO_CHECK=$(shell ls -d */ | grep -v "vendor")
PKGS_TO_CHECK=$(shell go list ./... | grep -vE "/vendor")
ifneq (${PKG},)
DIRS_TO_CHECK="./${PKG}"
PKGS_TO_CHECK="${PACKAGE_NAME}/${PKG}"
endif
.PHONY: help
help:
@echo "Please use \`make <target>\` where <target> is one of"
@echo " check to vet and lint"
@echo " test to run test"
@echo " test-coverage to run test with coverage"
.PHONY: check
check: format vet lint
.PHONY: format
format:
@gofmt -w .
@echo "ok"
.PHONY: vet
vet:
@echo "go tool vet, skipping vendor packages"
@go tool vet -all ${DIRS_TO_CHECK}
@echo "ok"
.PHONY: lint
lint:
@echo "golint, skipping vendor packages"
@lint=$$(for pkg in ${PKGS_TO_CHECK}; do golint $${pkg}; done); \
lint=$$(echo "$${lint}"); \
if [[ -n $${lint} ]]; then echo "$${lint}"; exit 1; fi
@echo "ok"
.PHONY: update
.PHONY: test
test:
@echo "run test"
@go test -v ${PKGS_TO_CHECK}
@echo "ok"
.PHONY: test-coverage
test-coverage:
@echo "run test with coverage"
@for pkg in ${PKGS_TO_CHECK}; do \
output="coverage$${pkg#${PACKAGE_NAME}}"; \
mkdir -p $${output}; \
go test -v -cover -coverprofile="$${output}/profile.out" $${pkg}; \
if [[ -e "$${output}/profile.out" ]]; then \
go tool cover -html="$${output}/profile.out" \
-o "$${output}/profile.html"; \
fi; \
done
@echo "ok"

19
vendor/github.com/pengsrc/go-shared/README.md generated vendored Normal file
View File

@ -0,0 +1,19 @@
# go-shared
[![Build Status](https://travis-ci.org/pengsrc/go-shared.svg?branch=master)](https://travis-ci.org/pengsrc/go-shared)
[![Go Report Card](https://goreportcard.com/badge/github.com/pengsrc/go-shared)](https://goreportcard.com/report/github.com/pengsrc/go-shared)
[![License](http://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/yunify/qingstor-sdk-go/blob/master/LICENSE)
Useful packages for the Go programming language.
## Contributing
1. Fork it ( https://github.com/pengsrc/go-shared/fork )
2. Create your feature branch (`git checkout -b new-feature`)
3. Commit your changes (`git commit -asm 'Add some feature'`)
4. Push to the branch (`git push origin new-feature`)
5. Create a new Pull Request
## LICENSE
The Apache License (Version 2.0, January 2004).

21
vendor/github.com/pengsrc/go-shared/check/error.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package check
import (
"fmt"
"os"
)
// ErrorForExit check the error.
// If error is not nil, print the error message and exit the application.
// If error is nil, do nothing.
func ErrorForExit(name string, err error, code ...int) {
if err != nil {
exitCode := 1
if len(code) > 0 {
exitCode = code[0]
}
fmt.Fprintf(os.Stderr, "%s: %s (%d)\n", name, err.Error(), exitCode)
fmt.Fprintf(os.Stderr, "See \"%s --help\".\n", name)
os.Exit(exitCode)
}
}

View File

@ -0,0 +1,12 @@
package check
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCheckErrorForExit(t *testing.T) {
ErrorForExit("name", nil)
assert.True(t, true)
}

11
vendor/github.com/pengsrc/go-shared/check/host.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package check
import (
"regexp"
)
// HostAndPort checks whether a string contains host and port.
// It returns true if matched.
func HostAndPort(hostAndPort string) bool {
return regexp.MustCompile(`^[^:]+:[0-9]+$`).MatchString(hostAndPort)
}

16
vendor/github.com/pengsrc/go-shared/check/host_test.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
package check
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCheckHostAndPort(t *testing.T) {
assert.False(t, HostAndPort("127.0.0.1:80:90"))
assert.False(t, HostAndPort("127.0.0.1"))
assert.False(t, HostAndPort("mysql"))
assert.False(t, HostAndPort("mysql:mysql"))
assert.True(t, HostAndPort("mysql:3306"))
assert.True(t, HostAndPort("172.16.70.50:6379"))
}

41
vendor/github.com/pengsrc/go-shared/check/slice.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
package check
// StringSliceContains iterates over the slice to find the target.
func StringSliceContains(slice []string, target string) bool {
for _, v := range slice {
if v == target {
return true
}
}
return false
}
// IntSliceContains iterates over the slice to find the target.
func IntSliceContains(slice []int, target int) bool {
for _, v := range slice {
if v == target {
return true
}
}
return false
}
// Int32SliceContains iterates over the slice to find the target.
func Int32SliceContains(slice []int32, target int32) bool {
for _, v := range slice {
if v == target {
return true
}
}
return false
}
// Int64SliceContains iterates over the slice to find the target.
func Int64SliceContains(slice []int64, target int64) bool {
for _, v := range slice {
if v == target {
return true
}
}
return false
}

View File

@ -0,0 +1,27 @@
package check
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestStringSliceContains(t *testing.T) {
assert.True(t, StringSliceContains([]string{"1", "2", "3"}, "2"))
assert.False(t, StringSliceContains([]string{"1", "2", "3"}, "4"))
}
func TestIntSliceContains(t *testing.T) {
assert.True(t, IntSliceContains([]int{1, 2, 3, 4, 5, 6}, 4))
assert.False(t, IntSliceContains([]int{1, 2, 3, 4, 5, 6}, 7))
}
func TestInt32SliceContains(t *testing.T) {
assert.True(t, Int32SliceContains([]int32{1, 2, 3, 4, 5, 6}, 4))
assert.False(t, Int32SliceContains([]int32{1, 2, 3, 4, 5, 6}, 7))
}
func TestInt64SliceContains(t *testing.T) {
assert.True(t, Int64SliceContains([]int64{1, 2, 3, 4, 5, 6}, 4))
assert.False(t, Int64SliceContains([]int64{1, 2, 3, 4, 5, 6}, 7))
}

56
vendor/github.com/pengsrc/go-shared/convert/time.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package convert
import (
"fmt"
"time"
"github.com/pengsrc/go-shared/check"
)
// Supported time layouts
const (
RFC822 = "Mon, 02 Jan 2006 15:04:05 GMT"
ISO8601 = "2006-01-02T15:04:05Z"
ISO8601Milli = "2006-01-02T15:04:05.000Z"
NGINXTime = "02/Jan/2006:15:04:05 -0700"
)
// TimeToString transforms given time to string.
func TimeToString(timeValue time.Time, format string) string {
if check.StringSliceContains([]string{RFC822, ISO8601, ISO8601Milli}, format) {
timeValue = timeValue.UTC()
}
return timeValue.Format(format)
}
// StringToTime transforms given string to time.
func StringToTime(timeString string, format string) (time.Time, error) {
result, err := time.Parse(format, timeString)
if timeString != "0001-01-01T00:00:00Z" {
zero := time.Time{}
if result == zero {
err = fmt.Errorf(`failed to parse "%s" like "%s"`, timeString, format)
}
}
return result, err
}
// TimeToTimestamp transforms given time to unix time int.
func TimeToTimestamp(t time.Time) int64 {
return t.Unix()
}
// TimestampToTime transforms given unix time int64 to time in UTC.
func TimestampToTime(unix int64) time.Time {
return time.Unix(unix, 0).UTC()
}
// StringToUnixTimestamp transforms given string to unix time int64. It will
// return -1 when time string parse error.
func StringToUnixTimestamp(timeString string, format string) int64 {
t, err := StringToTime(timeString, format)
if err != nil {
return -1
}
return t.Unix()
}

View File

@ -0,0 +1,78 @@
package convert
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestTimeToString(t *testing.T) {
tz, err := time.LoadLocation("Asia/Shanghai")
assert.NoError(t, err)
someTime := time.Date(2016, 9, 1, 15, 30, 0, 500000000, tz)
assert.Equal(t, "Thu, 01 Sep 2016 07:30:00 GMT", TimeToString(someTime, RFC822))
assert.Equal(t, "2016-09-01T07:30:00Z", TimeToString(someTime, ISO8601))
assert.Equal(t, "2016-09-01T07:30:00.500Z", TimeToString(someTime, ISO8601Milli))
assert.Equal(t, "01/Sep/2016:15:30:00 +0800", TimeToString(someTime, NGINXTime))
assert.Equal(t, "01/Sep/2016:07:30:00 +0000", TimeToString(someTime.UTC(), NGINXTime))
}
func TestStringToTime(t *testing.T) {
tz, err := time.LoadLocation("Asia/Shanghai")
assert.NoError(t, err)
someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz)
parsedTime, err := StringToTime("Thu, 01 Sep 2016 07:30:00 GMT", RFC822)
assert.NoError(t, err)
assert.Equal(t, someTime.UTC(), parsedTime)
parsedTime, err = StringToTime("2016-09-01T07:30:00Z", ISO8601)
assert.NoError(t, err)
assert.Equal(t, someTime.UTC(), parsedTime)
parsedTime, err = StringToTime("1472715000", ISO8601)
assert.Error(t, err)
assert.Equal(t, time.Time{}, parsedTime)
someTime = time.Date(2016, 9, 1, 15, 30, 0, 500000000, tz)
parsedTime, err = StringToTime("2016-09-01T07:30:00.500Z", ISO8601Milli)
assert.NoError(t, err)
assert.Equal(t, someTime.UTC(), parsedTime)
someTime = time.Date(2016, 9, 1, 15, 30, 0, 0, tz)
parsedTime, err = StringToTime("01/Sep/2016:15:30:00 +0800", NGINXTime)
assert.NoError(t, err)
assert.Equal(t, someTime.UTC(), parsedTime.UTC())
parsedTime, err = StringToTime("01/Sep/2016:07:30:00 +0000", NGINXTime)
assert.NoError(t, err)
assert.Equal(t, someTime.UTC(), parsedTime.UTC())
}
func TestStringToUnixString(t *testing.T) {
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("Thu, 01 Sep 2016 07:30:00 GMT", RFC822))
assert.Equal(t, int64(-1), StringToUnixTimestamp("2016-09-01T07:30:00.000Z", RFC822))
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00Z", ISO8601))
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00.000Z", ISO8601Milli))
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("2016-09-01T07:30:00.500Z", ISO8601Milli))
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("01/Sep/2016:15:30:00 +0800", NGINXTime))
assert.Equal(t, int64(1472715000), StringToUnixTimestamp("01/Sep/2016:07:30:00 +0000", NGINXTime))
}
func TestTimeToUnixInt(t *testing.T) {
tz, err := time.LoadLocation("Asia/Shanghai")
assert.NoError(t, err)
someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz)
assert.Equal(t, int64(1472715000), TimeToTimestamp(someTime))
}
func TestUnixIntToTime(t *testing.T) {
tz, err := time.LoadLocation("Asia/Shanghai")
assert.NoError(t, err)
someTime := time.Date(2016, 9, 1, 15, 30, 0, 0, tz)
assert.Equal(t, someTime.UTC(), TimestampToTime(1472715000))
}

477
vendor/github.com/pengsrc/go-shared/convert/types.go generated vendored Normal file
View File

@ -0,0 +1,477 @@
package convert
import (
"time"
)
// String returns a pointer to the given string value.
func String(v string) *string {
return &v
}
// StringValue returns the value of the given string pointer or
// "" if the pointer is nil.
func StringValue(v *string) string {
if v != nil {
return *v
}
return ""
}
// StringSlice converts a slice of string values into a slice of
// string pointers
func StringSlice(src []string) []*string {
dst := make([]*string, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// StringValueSlice converts a slice of string pointers into a slice of
// string values
func StringValueSlice(src []*string) []string {
dst := make([]string, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// StringMap converts a string map of string values into a string
// map of string pointers
func StringMap(src map[string]string) map[string]*string {
dst := make(map[string]*string)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// StringValueMap converts a string map of string pointers into a string
// map of string values
func StringValueMap(src map[string]*string) map[string]string {
dst := make(map[string]string)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Bool returns a pointer to the given bool value.
func Bool(v bool) *bool {
return &v
}
// BoolValue returns the value of the given bool pointer or
// false if the pointer is nil.
func BoolValue(v *bool) bool {
if v != nil {
return *v
}
return false
}
// BoolSlice converts a slice of bool values into a slice of
// bool pointers
func BoolSlice(src []bool) []*bool {
dst := make([]*bool, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// BoolValueSlice converts a slice of bool pointers into a slice of
// bool values
func BoolValueSlice(src []*bool) []bool {
dst := make([]bool, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// BoolMap converts a string map of bool values into a string
// map of bool pointers
func BoolMap(src map[string]bool) map[string]*bool {
dst := make(map[string]*bool)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// BoolValueMap converts a string map of bool pointers into a string
// map of bool values
func BoolValueMap(src map[string]*bool) map[string]bool {
dst := make(map[string]bool)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int returns a pointer to the given int value.
func Int(v int) *int {
return &v
}
// IntValue returns the value of the given int pointer or
// 0 if the pointer is nil.
func IntValue(v *int) int {
if v != nil {
return *v
}
return 0
}
// IntSlice converts a slice of int values into a slice of
// int pointers
func IntSlice(src []int) []*int {
dst := make([]*int, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// IntValueSlice converts a slice of int pointers into a slice of
// int values
func IntValueSlice(src []*int) []int {
dst := make([]int, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// IntMap converts a string map of int values into a string
// map of int pointers
func IntMap(src map[string]int) map[string]*int {
dst := make(map[string]*int)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// IntValueMap converts a string map of int pointers into a string
// map of int values
func IntValueMap(src map[string]*int) map[string]int {
dst := make(map[string]int)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int32 returns a pointer to the given int32 value.
func Int32(v int32) *int32 {
return &v
}
// Int32Value returns the value of the given int32 pointer or
// 0 if the pointer is nil.
func Int32Value(v *int32) int32 {
if v != nil {
return *v
}
return 0
}
// Int32Slice converts a slice of int32 values into a slice of
// int32 pointers
func Int32Slice(src []int32) []*int32 {
dst := make([]*int32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int32ValueSlice converts a slice of int32 pointers into a slice of
// int32 values
func Int32ValueSlice(src []*int32) []int32 {
dst := make([]int32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int32Map converts a string map of int32 values into a string
// map of int32 pointers
func Int32Map(src map[string]int32) map[string]*int32 {
dst := make(map[string]*int32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int32ValueMap converts a string map of int32 pointers into a string
// map of int32 values
func Int32ValueMap(src map[string]*int32) map[string]int32 {
dst := make(map[string]int32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the given int64 value.
func Int64(v int64) *int64 {
return &v
}
// Int64Value returns the value of the given int64 pointer or
// 0 if the pointer is nil.
func Int64Value(v *int64) int64 {
if v != nil {
return *v
}
return 0
}
// Int64Slice converts a slice of int64 values into a slice of
// int64 pointers
func Int64Slice(src []int64) []*int64 {
dst := make([]*int64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int64ValueSlice converts a slice of int64 pointers into a slice of
// int64 values
func Int64ValueSlice(src []*int64) []int64 {
dst := make([]int64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int64Map converts a string map of int64 values into a string
// map of int64 pointers
func Int64Map(src map[string]int64) map[string]*int64 {
dst := make(map[string]*int64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int64ValueMap converts a string map of int64 pointers into a string
// map of int64 values
func Int64ValueMap(src map[string]*int64) map[string]int64 {
dst := make(map[string]int64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float32 returns a pointer to the given float32 value.
func Float32(v float32) *float32 {
return &v
}
// Float32Value returns the value of the given float32 pointer or
// 0 if the pointer is nil.
func Float32Value(v *float32) float32 {
if v != nil {
return *v
}
return 0
}
// Float32Slice converts a slice of float32 values into a slice of
// float32 pointers
func Float32Slice(src []float32) []*float32 {
dst := make([]*float32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float32ValueSlice converts a slice of float32 pointers into a slice of
// float32 values
func Float32ValueSlice(src []*float32) []float32 {
dst := make([]float32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float32Map converts a string map of float32 values into a string
// map of float32 pointers
func Float32Map(src map[string]float32) map[string]*float32 {
dst := make(map[string]*float32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float32ValueMap converts a string map of float32 pointers into a string
// map of float32 values
func Float32ValueMap(src map[string]*float32) map[string]float32 {
dst := make(map[string]float32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the given float64 value.
func Float64(v float64) *float64 {
return &v
}
// Float64Value returns the value of the given float64 pointer or
// 0 if the pointer is nil.
func Float64Value(v *float64) float64 {
if v != nil {
return *v
}
return 0
}
// Float64Slice converts a slice of float64 values into a slice of
// float64 pointers
func Float64Slice(src []float64) []*float64 {
dst := make([]*float64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float64ValueSlice converts a slice of float64 pointers into a slice of
// float64 values
func Float64ValueSlice(src []*float64) []float64 {
dst := make([]float64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float64Map converts a string map of float64 values into a string
// map of float64 pointers
func Float64Map(src map[string]float64) map[string]*float64 {
dst := make(map[string]*float64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float64ValueMap converts a string map of float64 pointers into a string
// map of float64 values
func Float64ValueMap(src map[string]*float64) map[string]float64 {
dst := make(map[string]float64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Time returns a pointer to the given time.Time value.
func Time(v time.Time) *time.Time {
return &v
}
// TimeValue returns the value of the given time.Time pointer or
// time.Time{} if the pointer is nil.
func TimeValue(v *time.Time) time.Time {
if v != nil {
return *v
}
return time.Time{}
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {
dst := make([]*time.Time, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// TimeValueSlice converts a slice of time.Time pointers into a slice of
// time.Time values
func TimeValueSlice(src []*time.Time) []time.Time {
dst := make([]time.Time, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// TimeMap converts a string map of time.Time values into a string
// map of time.Time pointers
func TimeMap(src map[string]time.Time) map[string]*time.Time {
dst := make(map[string]*time.Time)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// TimeValueMap converts a string map of time.Time pointers into a string
// map of time.Time values
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
dst := make(map[string]time.Time)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}

View File

@ -0,0 +1,635 @@
package convert
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestStringValue(t *testing.T) {
s := "string"
assert.Equal(t, s, StringValue(String(s)))
assert.Equal(t, "", StringValue(nil))
}
var testCasesStringSlice = [][]string{
{"a", "b", "c", "d", "e"},
{"a", "b", "", "", "e"},
}
func TestStringSlice(t *testing.T) {
for idx, in := range testCasesStringSlice {
if in == nil {
continue
}
out := StringSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := StringValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesStringValueSlice = [][]*string{
{String("a"), String("b"), nil, String("c")},
}
func TestStringValueSlice(t *testing.T) {
for idx, in := range testCasesStringValueSlice {
if in == nil {
continue
}
out := StringValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := StringSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesStringMap = []map[string]string{
{"a": "1", "b": "2", "c": "3"},
}
func TestStringMap(t *testing.T) {
for idx, in := range testCasesStringMap {
if in == nil {
continue
}
out := StringMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := StringValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestBoolValue(t *testing.T) {
b := true
assert.Equal(t, b, BoolValue(Bool(b)))
assert.Equal(t, false, BoolValue(nil))
}
var testCasesBoolSlice = [][]bool{
{true, true, false, false},
}
func TestBoolSlice(t *testing.T) {
for idx, in := range testCasesBoolSlice {
if in == nil {
continue
}
out := BoolSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := BoolValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesBoolValueSlice = [][]*bool{}
func TestBoolValueSlice(t *testing.T) {
for idx, in := range testCasesBoolValueSlice {
if in == nil {
continue
}
out := BoolValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := BoolSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesBoolMap = []map[string]bool{
{"a": true, "b": false, "c": true},
}
func TestBoolMap(t *testing.T) {
for idx, in := range testCasesBoolMap {
if in == nil {
continue
}
out := BoolMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := BoolValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestIntValue(t *testing.T) {
i := 1024
assert.Equal(t, i, IntValue(Int(i)))
assert.Equal(t, 0, IntValue(nil))
}
var testCasesIntSlice = [][]int{
{1, 2, 3, 4},
}
func TestIntSlice(t *testing.T) {
for idx, in := range testCasesIntSlice {
if in == nil {
continue
}
out := IntSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := IntValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesIntValueSlice = [][]*int{}
func TestIntValueSlice(t *testing.T) {
for idx, in := range testCasesIntValueSlice {
if in == nil {
continue
}
out := IntValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := IntSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesIntMap = []map[string]int{
{"a": 3, "b": 2, "c": 1},
}
func TestIntMap(t *testing.T) {
for idx, in := range testCasesIntMap {
if in == nil {
continue
}
out := IntMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := IntValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestInt64Value(t *testing.T) {
i := int64(1024)
assert.Equal(t, i, Int64Value(Int64(i)))
assert.Equal(t, int64(0), Int64Value(nil))
}
var testCasesInt64Slice = [][]int64{
{1, 2, 3, 4},
}
func TestInt64Slice(t *testing.T) {
for idx, in := range testCasesInt64Slice {
if in == nil {
continue
}
out := Int64Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int64ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesInt64ValueSlice = [][]*int64{}
func TestInt64ValueSlice(t *testing.T) {
for idx, in := range testCasesInt64ValueSlice {
if in == nil {
continue
}
out := Int64ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Int64Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesInt64Map = []map[string]int64{
{"a": 3, "b": 2, "c": 1},
}
func TestInt64Map(t *testing.T) {
for idx, in := range testCasesInt64Map {
if in == nil {
continue
}
out := Int64Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int64ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestInt32Value(t *testing.T) {
i := int32(1024)
assert.Equal(t, i, Int32Value(Int32(i)))
assert.Equal(t, int32(0), Int32Value(nil))
}
var testCasesInt32Slice = [][]int32{
{1, 2, 3, 4},
}
func TestInt32Slice(t *testing.T) {
for idx, in := range testCasesInt32Slice {
if in == nil {
continue
}
out := Int32Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int32ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesInt32ValueSlice = [][]*int32{}
func TestInt32ValueSlice(t *testing.T) {
for idx, in := range testCasesInt32ValueSlice {
if in == nil {
continue
}
out := Int32ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Int32Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesInt32Map = []map[string]int32{
{"a": 3, "b": 2, "c": 1},
}
func TestInt32Map(t *testing.T) {
for idx, in := range testCasesInt32Map {
if in == nil {
continue
}
out := Int32Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Int32ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestFloat64Value(t *testing.T) {
i := float64(1024)
assert.Equal(t, i, Float64Value(Float64(i)))
assert.Equal(t, float64(0), Float64Value(nil))
}
var testCasesFloat64Slice = [][]float64{
{1, 2, 3, 4},
}
func TestFloat64Slice(t *testing.T) {
for idx, in := range testCasesFloat64Slice {
if in == nil {
continue
}
out := Float64Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float64ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesFloat64ValueSlice = [][]*float64{}
func TestFloat64ValueSlice(t *testing.T) {
for idx, in := range testCasesFloat64ValueSlice {
if in == nil {
continue
}
out := Float64ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Float64Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesFloat64Map = []map[string]float64{
{"a": 3, "b": 2, "c": 1},
}
func TestFloat64Map(t *testing.T) {
for idx, in := range testCasesFloat64Map {
if in == nil {
continue
}
out := Float64Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float64ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestFloat32Value(t *testing.T) {
i := float32(1024)
assert.Equal(t, i, Float32Value(Float32(i)))
assert.Equal(t, float32(0), Float32Value(nil))
}
var testCasesFloat32Slice = [][]float32{
{1, 2, 3, 4},
}
func TestFloat32Slice(t *testing.T) {
for idx, in := range testCasesFloat32Slice {
if in == nil {
continue
}
out := Float32Slice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float32ValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesFloat32ValueSlice = [][]*float32{}
func TestFloat32ValueSlice(t *testing.T) {
for idx, in := range testCasesFloat32ValueSlice {
if in == nil {
continue
}
out := Float32ValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := Float32Slice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesFloat32Map = []map[string]float32{
{"a": 3, "b": 2, "c": 1},
}
func TestFloat32Map(t *testing.T) {
for idx, in := range testCasesFloat32Map {
if in == nil {
continue
}
out := Float32Map(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := Float32ValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
func TestTimeValue(t *testing.T) {
tm := time.Time{}
assert.Equal(t, tm, TimeValue(Time(tm)))
assert.Equal(t, time.Time{}, TimeValue(nil))
}
var testCasesTimeSlice = [][]time.Time{
{time.Now(), time.Now().AddDate(100, 0, 0)},
}
func TestTimeSlice(t *testing.T) {
for idx, in := range testCasesTimeSlice {
if in == nil {
continue
}
out := TimeSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := TimeValueSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}
var testCasesTimeValueSlice = [][]*time.Time{}
func TestTimeValueSlice(t *testing.T) {
for idx, in := range testCasesTimeValueSlice {
if in == nil {
continue
}
out := TimeValueSlice(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
if in[i] == nil {
assert.Empty(t, out[i], "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx)
}
}
out2 := TimeSlice(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
for i := range out2 {
if in[i] == nil {
assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx)
} else {
assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx)
}
}
}
}
var testCasesTimeMap = []map[string]time.Time{
{"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()},
}
func TestTimeMap(t *testing.T) {
for idx, in := range testCasesTimeMap {
if in == nil {
continue
}
out := TimeMap(in)
assert.Len(t, out, len(in), "Unexpected len at idx %d", idx)
for i := range out {
assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx)
}
out2 := TimeValueMap(out)
assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx)
assert.Equal(t, in, out2, "Unexpected value at idx %d", idx)
}
}

28
vendor/github.com/pengsrc/go-shared/glide.lock generated vendored Normal file
View File

@ -0,0 +1,28 @@
hash: d9fb8784d4c53a209ba12b5be1dc51d43b925ff53a06b5df850a9f7afa5e3e08
updated: 2017-04-11T15:44:30.513321945+08:00
imports:
- name: github.com/davecgh/go-spew
version: 346938d642f2ec3594ed81d874461961cd0faa76
subpackages:
- spew
- name: github.com/Jeffail/gabs
version: 2a3aa15961d5fee6047b8151b67ac2f08ba2c48c
- name: github.com/pmezard/go-difflib
version: 792786c7400a136282c1664665ae0a8db921c6c2
subpackages:
- difflib
- name: github.com/Sirupsen/logrus
version: d26492970760ca5d33129d2d799e34be5c4782eb
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert
- name: golang.org/x/sys
version: f3918c30c5c2cb527c0b071a27c35120a6c0719a
repo: https://github.com/golang/sys.git
subpackages:
- unix
- name: gopkg.in/yaml.v2
version: a5b47d31c556af34a302ce5d659e6fea44d90de0
repo: https://github.com/go-yaml/yaml.git
testImports: []

23
vendor/github.com/pengsrc/go-shared/glide.yaml generated vendored Normal file
View File

@ -0,0 +1,23 @@
package: github.com/pengsrc/go-shared
import:
# Test
- package: github.com/stretchr/testify
version: v1.1.4
- package: github.com/davecgh/go-spew
version: v1.1.0
- package: github.com/pmezard/go-difflib
version: v1.0.0
# JSON
- package: github.com/Jeffail/gabs
version: 1.0
# YAML
- package: gopkg.in/yaml.v2
version: a5b47d31c556af34a302ce5d659e6fea44d90de0
repo: https://github.com/go-yaml/yaml.git
# Logging
- package: github.com/Sirupsen/logrus
version: v0.11.0
# GoLang
- package: golang.org/x/sys
version: f3918c30c5c2cb527c0b071a27c35120a6c0719a
repo: https://github.com/golang/sys.git

50
vendor/github.com/pengsrc/go-shared/json/json.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package json
import (
"bytes"
"encoding/json"
)
// Encode encode given interface to json byte slice.
func Encode(source interface{}, unescape bool) ([]byte, error) {
bytesResult, err := json.Marshal(source)
if err != nil {
return []byte{}, err
}
if unescape {
bytesResult = bytes.Replace(bytesResult, []byte("\\u003c"), []byte("<"), -1)
bytesResult = bytes.Replace(bytesResult, []byte("\\u003e"), []byte(">"), -1)
bytesResult = bytes.Replace(bytesResult, []byte("\\u0026"), []byte("&"), -1)
}
return bytesResult, nil
}
// Decode decode given json byte slice to corresponding struct.
func Decode(content []byte, destinations ...interface{}) (interface{}, error) {
var destination interface{}
var err error
if len(destinations) == 1 {
destination = destinations[0]
err = json.Unmarshal(content, destination)
} else {
err = json.Unmarshal(content, &destination)
}
if err != nil {
return nil, err
}
return destination, err
}
// FormatToReadable formats given json byte slice prettily.
func FormatToReadable(source []byte) ([]byte, error) {
var out bytes.Buffer
err := json.Indent(&out, source, "", " ") // Using 2 space indent
if err != nil {
return []byte{}, err
}
return out.Bytes(), nil
}

79
vendor/github.com/pengsrc/go-shared/json/json_test.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package json
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestJSONDecodeUnknown(t *testing.T) {
jsonString := `{
"key1" : "This is a string.",
"key2" : 10.50,
"key3": [null, {"nestedKey1": "Another string"}]
}`
anyData, err := Decode([]byte(jsonString))
assert.NoError(t, err)
data := anyData.(map[string]interface{})
assert.Equal(t, 10.50, data["key2"])
var anotherData interface{}
_, err = Decode([]byte(jsonString), &anotherData)
assert.NoError(t, err)
data = anyData.(map[string]interface{})
assert.Equal(t, 10.50, data["key2"])
_, err = Decode([]byte(`- - -`), &JSONMustError{})
assert.Error(t, err)
}
func TestJSONDecodeKnown(t *testing.T) {
type SampleJSON struct {
Name string `json:"name"`
Description string `json:"description"`
}
sampleJSONString := `{"name": "NAME"}`
sample := SampleJSON{Name: "NaMe", Description: "DeScRiPtIoN"}
anyDataPointer, err := Decode([]byte(sampleJSONString), &sample)
assert.NoError(t, err)
data := anyDataPointer.(*SampleJSON)
assert.Equal(t, "NAME", sample.Name)
assert.Equal(t, "DeScRiPtIoN", sample.Description)
assert.Equal(t, "NAME", (*data).Name)
assert.Equal(t, "DeScRiPtIoN", (*data).Description)
}
func TestJSONEncode(t *testing.T) {
type SampleJSON struct {
Name string `json:"name"`
Description string `json:"description"`
}
sample := SampleJSON{Name: "NaMe", Description: "DeScRiPtIoN"}
jsonBytes, err := Encode(sample, true)
assert.NoError(t, err)
assert.Equal(t, `{"name":"NaMe","description":"DeScRiPtIoN"}`, string(jsonBytes))
_, err = Encode(&JSONMustError{}, true)
assert.Error(t, err)
}
func TestJSONFormatToReadable(t *testing.T) {
sampleJSONString := `{"name": "NAME"}`
jsonBytes, err := FormatToReadable([]byte(sampleJSONString))
assert.NoError(t, err)
assert.Equal(t, "{\n \"name\": \"NAME\"\n}", string(jsonBytes))
_, err = FormatToReadable([]byte(`XXXXX`))
assert.Error(t, err)
}
type JSONMustError struct{}
func (*JSONMustError) MarshalJSON() ([]byte, error) {
return []byte{}, errors.New("marshal error")
}

358
vendor/github.com/pengsrc/go-shared/logger/logger.go generated vendored Normal file
View File

@ -0,0 +1,358 @@
// Package logger provides support for logging to stdout and stderr.
package logger
import (
"errors"
"fmt"
"io"
"os"
"os/signal"
"path"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/pengsrc/go-shared/convert"
"github.com/pengsrc/go-shared/reopen"
)
// LogFormatter is used to format log entry.
type LogFormatter struct{}
// Format formats a given log entry, returns byte slice and error.
func (c *LogFormatter) Format(entry *log.Entry) ([]byte, error) {
level := strings.ToUpper(entry.Level.String())
if level == "WARNING" {
level = "WARN"
}
if len(level) < 5 {
level = strings.Repeat(" ", 5-len(level)) + level
}
return []byte(
fmt.Sprintf(
"[%s #%d] %s -- : %s\n",
convert.TimeToString(time.Now(), convert.ISO8601Milli),
os.Getpid(),
level,
entry.Message,
),
), nil
}
// NewLogFormatter creates a new log formatter.
func NewLogFormatter() *LogFormatter {
return &LogFormatter{}
}
// ErrorHook presents error hook.
type ErrorHook struct {
levels []log.Level
out io.Writer
formatter log.Formatter
}
// Levels returns error log levels.
func (eh *ErrorHook) Levels() []log.Level {
return eh.levels
}
// Fire triggers before logging.
func (eh *ErrorHook) Fire(entry *log.Entry) error {
formatted, err := eh.formatter.Format(entry)
if err != nil {
return err
}
_, err = eh.out.Write(formatted)
if err != nil {
return err
}
return nil
}
// NewErrorHook creates new error hook.
func NewErrorHook(out io.Writer) *ErrorHook {
return &ErrorHook{
levels: []log.Level{
log.WarnLevel,
log.ErrorLevel,
log.FatalLevel,
log.PanicLevel,
},
out: out,
formatter: NewLogFormatter(),
}
}
// Logger presents a logger.
type Logger struct {
origLogger *log.Logger
out io.Writer
errOut io.Writer
bufferedOut Flusher
bufferedErrOut Flusher
}
// Flusher defines a interface with Flush() method.
type Flusher interface {
Flush()
}
// GetLevel get the log level string.
func (l *Logger) GetLevel() string {
return l.origLogger.Level.String()
}
// SetLevel sets the log level. Valid levels are "debug", "info", "warn", "error", and "fatal".
func (l *Logger) SetLevel(level string) {
lvl, err := log.ParseLevel(level)
if err != nil {
l.Fatal(fmt.Sprintf(`log level not valid: "%s"`, level))
}
l.origLogger.Level = lvl
}
// Flush writes buffered logs.
func (l *Logger) Flush() {
if l.bufferedOut != nil {
l.bufferedOut.Flush()
}
if l.bufferedErrOut != nil {
l.bufferedErrOut.Flush()
}
}
// Debug logs a message with severity DEBUG.
func (l *Logger) Debug(message string) {
l.output(l.origLogger.Debug, message)
}
// Info logs a message with severity INFO.
func (l *Logger) Info(message string) {
l.output(l.origLogger.Info, message)
}
// Warn logs a message with severity WARN.
func (l *Logger) Warn(message string) {
l.output(l.origLogger.Warn, message)
}
// Error logs a message with severity ERROR.
func (l *Logger) Error(message string) {
l.output(l.origLogger.Error, message)
}
// Fatal logs a message with severity ERROR followed by a call to os.Exit().
func (l *Logger) Fatal(message string) {
l.output(l.origLogger.Fatal, message)
}
// Debugf logs a message with severity DEBUG in format.
func (l *Logger) Debugf(format string, v ...interface{}) {
l.output(l.origLogger.Debug, format, v...)
}
// Infof logs a message with severity INFO in format.
func (l *Logger) Infof(format string, v ...interface{}) {
l.output(l.origLogger.Info, format, v...)
}
// Warnf logs a message with severity WARN in format.
func (l *Logger) Warnf(format string, v ...interface{}) {
l.output(l.origLogger.Warn, format, v...)
}
// Errorf logs a message with severity ERROR in format.
func (l *Logger) Errorf(format string, v ...interface{}) {
l.output(l.origLogger.Error, format, v...)
}
// Fatalf logs a message with severity ERROR in format followed by a call to
// os.Exit().
func (l *Logger) Fatalf(format string, v ...interface{}) {
l.output(l.origLogger.Fatal, format, v...)
}
func (l *Logger) output(origin func(...interface{}), formatOrMessage string, v ...interface{}) {
if len(v) > 0 {
origin(fmt.Sprintf(formatOrMessage, v...))
} else {
origin(formatOrMessage)
}
}
// CheckLevel checks whether the log level is valid.
func CheckLevel(level string) error {
if _, err := log.ParseLevel(level); err != nil {
return fmt.Errorf(`log level not valid: "%s"`, level)
}
return nil
}
// NewFileLogger creates a logger that write into file.
func NewFileLogger(filePath string, level ...string) (*Logger, error) {
return NewFileLoggerWithErr(filePath, "", level...)
}
// NewFileLoggerWithErr creates a logger that write into files.
func NewFileLoggerWithErr(filePath, errFilePath string, level ...string) (*Logger, error) {
if err := checkDir(path.Dir(filePath)); err != nil {
return nil, err
}
if errFilePath != "" {
if err := checkDir(path.Dir(errFilePath)); err != nil {
return nil, err
}
}
out, err := reopen.NewFileWriter(filePath)
if err != nil {
return nil, err
}
var errOut *reopen.FileWriter
if errFilePath != "" {
errOut, err = reopen.NewFileWriter(errFilePath)
if err != nil {
return nil, err
}
}
c := make(chan os.Signal)
go func() {
for {
select {
case <-c:
out.Reopen()
if errOut != nil {
errOut.Reopen()
}
}
}
}()
signal.Notify(c, syscall.SIGHUP)
if errOut == nil {
return NewLoggerWithErr(out, nil, level...)
}
return NewLoggerWithErr(out, errOut, level...)
}
// NewBufferedFileLogger creates a logger that write into file with buffer.
func NewBufferedFileLogger(filePath string, level ...string) (*Logger, error) {
return NewBufferedFileLoggerWithErr(filePath, "", level...)
}
// NewBufferedFileLoggerWithErr creates a logger that write into files with buffer.
func NewBufferedFileLoggerWithErr(filePath, errFilePath string, level ...string) (*Logger, error) {
if err := checkDir(path.Dir(filePath)); err != nil {
return nil, err
}
if errFilePath != "" {
if err := checkDir(path.Dir(errFilePath)); err != nil {
return nil, err
}
}
out, err := reopen.NewFileWriter(filePath)
if err != nil {
return nil, err
}
var errOut *reopen.FileWriter
if errFilePath != "" {
errOut, err = reopen.NewFileWriter(errFilePath)
if err != nil {
return nil, err
}
}
bufferedOut := reopen.NewBufferedFileWriter(out)
var bufferedErrOut *reopen.BufferedFileWriter
if errOut != nil {
bufferedErrOut = reopen.NewBufferedFileWriter(errOut)
}
c := make(chan os.Signal)
go func() {
for {
select {
case <-c:
bufferedOut.Reopen()
if bufferedErrOut != nil {
bufferedErrOut.Reopen()
}
case <-time.After(10 * time.Second):
bufferedOut.Flush()
if bufferedErrOut != nil {
bufferedErrOut.Flush()
}
}
}
}()
signal.Notify(c, syscall.SIGHUP)
if bufferedErrOut == nil {
return NewLoggerWithErr(bufferedOut, nil, level...)
}
return NewLoggerWithErr(bufferedOut, bufferedErrOut, level...)
}
// NewTerminalLogger creates a logger that write into terminal.
func NewTerminalLogger(level ...string) (*Logger, error) {
return NewLogger(os.Stdout, level...)
}
// NewTerminalLoggerWithErr creates a logger that write into terminal.
func NewTerminalLoggerWithErr(level ...string) (*Logger, error) {
return NewLoggerWithErr(os.Stdout, os.Stderr, level...)
}
// NewLogger creates a new logger for given out and level, and the level is
// optional.
func NewLogger(out io.Writer, level ...string) (*Logger, error) {
return NewLoggerWithErr(out, nil, level...)
}
// NewLoggerWithErr creates a new logger for given out, err out, level, and the
// err out can be nil, and the level is optional.
func NewLoggerWithErr(out, errOut io.Writer, level ...string) (*Logger, error) {
if out == nil {
return nil, errors.New(`must specify the output for logger`)
}
l := &Logger{
origLogger: &log.Logger{
Out: out,
Formatter: NewLogFormatter(),
Hooks: log.LevelHooks{},
Level: log.WarnLevel,
},
out: out,
errOut: errOut,
}
if errOut != nil {
l.origLogger.Hooks.Add(NewErrorHook(l.errOut))
}
if len(level) == 1 {
if err := CheckLevel(level[0]); err != nil {
return nil, err
}
l.SetLevel(level[0])
}
return l, nil
}
func checkDir(dir string) error {
if info, err := os.Stat(dir); err != nil {
return fmt.Errorf(`directory not exists: %s`, dir)
} else if !info.IsDir() {
return fmt.Errorf(`path is not directory: %s`, dir)
}
return nil
}

View File

@ -0,0 +1,218 @@
package logger
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"syscall"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCheckLevel(t *testing.T) {
assert.NoError(t, CheckLevel("warn"))
assert.Error(t, CheckLevel("invalid"))
}
func TestSetAndGetLevel(t *testing.T) {
l, err := NewTerminalLogger()
assert.NoError(t, err)
l.SetLevel("error")
assert.Equal(t, "error", l.GetLevel())
}
func TestNewFileLogger(t *testing.T) {
logFile := "/tmp/logger-test/test.log"
dir := path.Dir(logFile)
err := os.MkdirAll(dir, 0775)
assert.NoError(t, err)
defer os.RemoveAll(dir)
l, err := NewFileLogger(logFile, "debug")
assert.NoError(t, err)
l.Debug("file - debug")
l.Info("file - info")
l.Warn("file - warn")
l.Error("file - error")
log, err := ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 5, len(strings.Split(string(log), "\n")))
// Move log file.
movedLogFile := fmt.Sprintf(`%s.move`, logFile)
os.Rename(logFile, movedLogFile)
l.Error("file - error")
log, err = ioutil.ReadFile(movedLogFile)
assert.NoError(t, err)
assert.Equal(t, 6, len(strings.Split(string(log), "\n")))
// Reopen.
syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
time.Sleep(10 * time.Millisecond)
l.Warn("file - warn")
l.Error("file - error")
log, err = ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 3, len(strings.Split(string(log), "\n")))
}
func TestNewFileLoggerWithWf(t *testing.T) {
logFile := "/tmp/logger-test/test.log"
errLogFile := "/tmp/logger-test/test.log.wf"
dir := path.Dir(logFile)
err := os.MkdirAll(dir, 0775)
assert.NoError(t, err)
defer os.RemoveAll(dir)
l, err := NewFileLoggerWithErr(logFile, errLogFile, "debug")
assert.NoError(t, err)
l.Debug("file - debug")
l.Info("file - info")
l.Warn("file - warn")
l.Error("file - error")
log, err := ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 5, len(strings.Split(string(log), "\n")))
errLog, err := ioutil.ReadFile(errLogFile)
assert.NoError(t, err)
assert.Equal(t, 3, len(strings.Split(string(errLog), "\n")))
// Move log file.
movedLogFile := fmt.Sprintf(`%s.move`, logFile)
os.Rename(logFile, movedLogFile)
movedErrLogFile := fmt.Sprintf(`%s.move`, errLogFile)
os.Rename(errLogFile, movedErrLogFile)
l.Error("file - error")
log, err = ioutil.ReadFile(movedLogFile)
assert.NoError(t, err)
assert.Equal(t, 6, len(strings.Split(string(log), "\n")))
errLog, err = ioutil.ReadFile(movedErrLogFile)
assert.NoError(t, err)
assert.Equal(t, 4, len(strings.Split(string(errLog), "\n")))
// Reopen.
syscall.Kill(syscall.Getpid(), syscall.SIGHUP)
time.Sleep(10 * time.Millisecond)
l.Warn("file - warn")
l.Error("file - error")
log, err = ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 3, len(strings.Split(string(log), "\n")))
errLog, err = ioutil.ReadFile(errLogFile)
assert.NoError(t, err)
assert.Equal(t, 3, len(strings.Split(string(errLog), "\n")))
}
func TestBufferedFileLogger(t *testing.T) {
logFile := "/tmp/logger-test/test.log"
dir := path.Dir(logFile)
err := os.MkdirAll(dir, 0775)
assert.NoError(t, err)
defer os.RemoveAll(dir)
l, err := NewBufferedFileLogger(logFile, "debug")
assert.NoError(t, err)
l.Debug("file - debug")
l.Info("file - info")
l.Warn("file - warn")
l.Error("file - error")
log, err := ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 1, len(strings.Split(string(log), "\n")))
// Wait timeout.
//time.Sleep(10*time.Second + 10*time.Millisecond)
//
//log, err = ioutil.ReadFile(logFile)
//assert.NoError(t, err)
//assert.Equal(t, 5, len(strings.Split(string(log), "\n")))
}
func TestBufferedFileLoggerWithErr(t *testing.T) {
logFile := "/tmp/logger-test/test.log"
errLogFile := "/tmp/logger-test/test.log.wf"
dir := path.Dir(logFile)
err := os.MkdirAll(dir, 0775)
assert.NoError(t, err)
defer os.RemoveAll(dir)
errL, err := NewBufferedFileLoggerWithErr(logFile, errLogFile, "debug")
assert.NoError(t, err)
errL.Debug("file - debug")
errL.Info("file - info")
errL.Warn("file - warn")
errL.Error("file - error")
log, err := ioutil.ReadFile(logFile)
assert.NoError(t, err)
assert.Equal(t, 1, len(strings.Split(string(log), "\n")))
errLog, err := ioutil.ReadFile(errLogFile)
assert.NoError(t, err)
assert.Equal(t, 1, len(strings.Split(string(errLog), "\n")))
// Wait timeout.
//time.Sleep(10*time.Second + 10*time.Millisecond)
//
//log, err = ioutil.ReadFile(logFile)
//assert.NoError(t, err)
//assert.Equal(t, 5, len(strings.Split(string(log), "\n")))
//
//errLog, err = ioutil.ReadFile(errLogFile)
//assert.NoError(t, err)
//assert.Equal(t, 3, len(strings.Split(string(errLog), "\n")))
}
func TestTerminalLogger(t *testing.T) {
l, err := NewTerminalLogger("debug")
assert.NoError(t, err)
l.Debug("terminal - debug")
l.Info("terminal - info")
l.Warn("terminal - warn")
l.Error("terminal - error")
l.Debugf("terminal - debug - %d", time.Now().Unix())
l.Infof("terminal - info - %d", time.Now().Unix())
l.Warnf("terminal - warn - %d", time.Now().Unix())
l.Errorf("terminal - error - %d", time.Now().Unix())
}
func TestTerminalLoggerWithErr(t *testing.T) {
errL, err := NewTerminalLoggerWithErr("debug")
assert.NoError(t, err)
errL.Debug("terminal - debug - err")
errL.Info("terminal - info - err")
errL.Warn("terminal - warn - err")
errL.Error("terminal - error - err")
errL.Debugf("terminal - debug - err - %d", time.Now().Unix())
errL.Infof("terminal - info - err - %d", time.Now().Unix())
errL.Warnf("terminal - warn - err - %d", time.Now().Unix())
errL.Errorf("terminal - error - err - %d", time.Now().Unix())
}

49
vendor/github.com/pengsrc/go-shared/pid/pidfile.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
// Package pid provides structure and helper functions to create and remove
// PID file. A PID file is usually a file used to store the process ID of a
// running process.
package pid
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
// File is a file used to store the process ID of a running process.
type File struct {
path string
}
func checkPIDFileAlreadyExists(path string) error {
if pidByte, err := ioutil.ReadFile(path); err == nil {
pidString := strings.TrimSpace(string(pidByte))
if pid, err := strconv.Atoi(pidString); err == nil {
if processExists(pid) {
return fmt.Errorf("pid file found, ensure server is not running or delete %s", path)
}
}
}
return nil
}
// New creates a PID file using the specified path.
func New(path string) (*File, error) {
if err := checkPIDFileAlreadyExists(path); err != nil {
return nil, err
}
if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil {
return nil, err
}
return &File{path: path}, nil
}
// Remove removes the File.
func (file File) Remove() error {
if err := os.Remove(file.path); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,14 @@
// +build darwin
package pid
import (
"syscall"
)
func processExists(pid int) bool {
// OS X does not have a proc filesystem.
// Use kill -0 pid to judge if the process exists.
err := syscall.Kill(pid, 0)
return err == nil
}

View File

@ -0,0 +1,38 @@
package pid
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestNewAndRemove(t *testing.T) {
dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile")
if err != nil {
t.Fatal("Could not create test directory")
}
path := filepath.Join(dir, "testfile")
file, err := New(path)
if err != nil {
t.Fatal("Could not create test file", err)
}
_, err = New(path)
if err == nil {
t.Fatal("Test file creation not blocked")
}
if err := file.Remove(); err != nil {
t.Fatal("Could not delete created test file")
}
}
func TestRemoveInvalidPath(t *testing.T) {
file := File{path: filepath.Join("foo", "bar")}
if err := file.Remove(); err == nil {
t.Fatal("Non-existing file doesn't give an error on delete")
}
}

View File

@ -0,0 +1,16 @@
// +build !windows,!darwin
package pid
import (
"os"
"path/filepath"
"strconv"
)
func processExists(pid int) bool {
if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil {
return true
}
return false
}

View File

@ -0,0 +1,23 @@
package pid
import "syscall"
const (
processQueryLimitedInformation = 0x1000
stillActive = 259
)
func processExists(pid int) bool {
h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
if err != nil {
return false
}
var c uint32
err = syscall.GetExitCodeProcess(h, &c)
syscall.Close(h)
if err != nil {
return c == stillActive
}
return true
}

176
vendor/github.com/pengsrc/go-shared/reopen/reopen.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
package reopen
import (
"bufio"
"io"
"os"
"sync"
"time"
)
// Reopener interface defines something that can be reopened.
type Reopener interface {
Reopen() error
}
// Writer is a writer that also can be reopened.
type Writer interface {
Reopener
io.Writer
}
// WriteCloser is a io.WriteCloser that can also be reopened.
type WriteCloser interface {
Reopener
io.WriteCloser
}
// FileWriter that can also be reopened.
type FileWriter struct {
// Ensures close/reopen/write are not called at the same time, protects f
mu sync.Mutex
f *os.File
mode os.FileMode
name string
}
// Close calls the under lying File.Close().
func (f *FileWriter) Close() error {
f.mu.Lock()
err := f.f.Close()
f.mu.Unlock()
return err
}
// Reopen the file.
func (f *FileWriter) Reopen() error {
f.mu.Lock()
err := f.reopen()
f.mu.Unlock()
return err
}
// Write implements the stander io.Writer interface.
func (f *FileWriter) Write(p []byte) (int, error) {
f.mu.Lock()
n, err := f.f.Write(p)
f.mu.Unlock()
return n, err
}
// reopen with mutex free.
func (f *FileWriter) reopen() error {
if f.f != nil {
f.f.Close()
f.f = nil
}
ff, err := os.OpenFile(f.name, os.O_WRONLY|os.O_APPEND|os.O_CREATE, f.mode)
if err != nil {
f.f = nil
return err
}
f.f = ff
return nil
}
// NewFileWriter opens a file for appending and writing and can be reopened.
// It is a ReopenWriteCloser...
func NewFileWriter(name string) (*FileWriter, error) {
// Standard default mode
return NewFileWriterMode(name, 0644)
}
// NewFileWriterMode opens a Reopener file with a specific permission.
func NewFileWriterMode(name string, mode os.FileMode) (*FileWriter, error) {
writer := FileWriter{
f: nil,
name: name,
mode: mode,
}
err := writer.reopen()
if err != nil {
return nil, err
}
return &writer, nil
}
// BufferedFileWriter is buffer writer than can be reopened.
type BufferedFileWriter struct {
mu sync.Mutex
OrigWriter *FileWriter
BufWriter *bufio.Writer
}
// Reopen implement Reopener.
func (bw *BufferedFileWriter) Reopen() error {
bw.mu.Lock()
bw.BufWriter.Flush()
// Use non-mutex version since we are using this one.
err := bw.OrigWriter.reopen()
bw.BufWriter.Reset(io.Writer(bw.OrigWriter))
bw.mu.Unlock()
return err
}
// Close flushes the internal buffer and closes the destination file.
func (bw *BufferedFileWriter) Close() error {
bw.mu.Lock()
bw.BufWriter.Flush()
bw.OrigWriter.f.Close()
bw.mu.Unlock()
return nil
}
// Write implements io.Writer (and reopen.Writer).
func (bw *BufferedFileWriter) Write(p []byte) (int, error) {
bw.mu.Lock()
n, err := bw.BufWriter.Write(p)
// Special Case... if the used space in the buffer is LESS than
// the input, then we did a flush in the middle of the line
// and the full log line was not sent on its way.
if bw.BufWriter.Buffered() < len(p) {
bw.BufWriter.Flush()
}
bw.mu.Unlock()
return n, err
}
// Flush flushes the buffer.
func (bw *BufferedFileWriter) Flush() {
bw.mu.Lock()
bw.BufWriter.Flush()
bw.OrigWriter.f.Sync()
bw.mu.Unlock()
}
// flushDaemon periodically flushes the log file buffers.
func (bw *BufferedFileWriter) flushDaemon(interval time.Duration) {
for range time.NewTicker(interval).C {
bw.Flush()
}
}
const bufferSize = 256 * 1024
const flushInterval = 30 * time.Second
// NewBufferedFileWriter opens a buffered file that is periodically flushed.
func NewBufferedFileWriter(w *FileWriter) *BufferedFileWriter {
return NewBufferedFileWriterSize(w, bufferSize, flushInterval)
}
// NewBufferedFileWriterSize opens a buffered file with the given size that is periodically
// flushed on the given interval.
func NewBufferedFileWriterSize(w *FileWriter, size int, flush time.Duration) *BufferedFileWriter {
bw := BufferedFileWriter{
OrigWriter: w,
BufWriter: bufio.NewWriterSize(w, size),
}
go bw.flushDaemon(flush)
return &bw
}

View File

@ -0,0 +1,124 @@
package reopen
import (
"io/ioutil"
"os"
"testing"
)
// TestReopenAppend tests that we always append to an existing file
func TestReopenAppend(t *testing.T) {
filename := "/tmp/reopen_test_foo"
defer os.Remove(filename)
// Create a sample file using normal means.
orig, err := os.Create(filename)
if err != nil {
t.Fatalf("Unable to create initial file %s: %s", filename, err)
}
_, err = orig.Write([]byte("line0\n"))
if err != nil {
t.Fatalf("Unable to write initial line %s: %s", filename, err)
}
err = orig.Close()
if err != nil {
t.Fatalf("Unable to close initial file: %s", err)
}
// Test that making a new File appends.
f, err := NewFileWriter(filename)
if err != nil {
t.Fatalf("Unable to create %s", filename)
}
_, err = f.Write([]byte("line1\n"))
if err != nil {
t.Errorf("Got write error1: %s", err)
}
// Test that reopen always appends.
err = f.Reopen()
if err != nil {
t.Errorf("Got reopen error %s: %s", filename, err)
}
_, err = f.Write([]byte("line2\n"))
if err != nil {
t.Errorf("Got write error2 on %s: %s", filename, err)
}
// Close file.
err = f.Close()
if err != nil {
t.Errorf("Got closing error for %s: %s", filename, err)
}
// Read file, make sure it contains line0, line1, line2.
out, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("Unable read in final file %s: %s", filename, err)
}
outStr := string(out)
if outStr != "line0\nline1\nline2\n" {
t.Errorf("Result was %s", outStr)
}
}
// TestChangeINode tests that reopen works when inode is swapped out.
func TestChangeINODE(t *testing.T) {
filename := "/tmp/reopen_test_foo"
moveFilename := "/tmp/reopen_test_foo.orig"
defer os.Remove(filename)
defer os.Remove(moveFilename)
// Step 1 -- Create a sample file using normal means.
orig, err := os.Create(filename)
if err != nil {
t.Fatalf("Unable to create initial file %s: %s", filename, err)
}
err = orig.Close()
if err != nil {
t.Fatalf("Unable to close initial file: %s", err)
}
// Step 2 -- Test that making a new File appends.
f, err := NewFileWriter(filename)
if err != nil {
t.Fatalf("Unable to create %s", filename)
}
_, err = f.Write([]byte("line1\n"))
if err != nil {
t.Errorf("Got write error1: %s", err)
}
// Step 3 -- Now move file.
err = os.Rename(filename, moveFilename)
if err != nil {
t.Errorf("Renaming error: %s", err)
}
f.Write([]byte("after1\n"))
// Step Test that reopen always appends.
err = f.Reopen()
if err != nil {
t.Errorf("Got reopen error %s: %s", filename, err)
}
_, err = f.Write([]byte("line2\n"))
if err != nil {
t.Errorf("Got write error2 on %s: %s", filename, err)
}
// Close file.
err = f.Close()
if err != nil {
t.Errorf("Got closing error for %s: %s", filename, err)
}
// Read file, make sure it contains line0, line1, line2.
out, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatalf("Unable read in final file %s: %s", filename, err)
}
outStr := string(out)
if outStr != "line2\n" {
t.Errorf("Result was %s", outStr)
}
}

151
vendor/github.com/pengsrc/go-shared/rest/rest.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package rest
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/Jeffail/gabs"
)
// Method contains the supported HTTP verbs.
type Method string
// Supported HTTP verbs.
const (
Get Method = "GET"
Post Method = "POST"
Put Method = "PUT"
Patch Method = "PATCH"
Delete Method = "DELETE"
)
// Request holds the request to an API Call.
type Request struct {
Method Method
BaseURL string // e.g. https://api.service.com
Headers map[string]string
QueryParams map[string]string
Body []byte
}
// Response holds the response from an API call.
type Response struct {
StatusCode int // e.g. 200
Headers http.Header // e.g. map[X-Rate-Limit:[600]]
Body string // e.g. {"result: success"}
JSON *gabs.Container
}
// ParseJSON parses the response body to JSON container.
func (r *Response) ParseJSON() error {
if strings.Contains(r.Headers.Get("Content-Type"), "application/json") {
json, err := gabs.ParseJSON([]byte(r.Body))
if err != nil {
return err
}
r.JSON = json
return nil
}
return errors.New("response body is not JSON")
}
// DefaultClient is used if no custom HTTP client is defined
var DefaultClient = &Client{HTTPClient: http.DefaultClient}
// Client allows modification of client headers, redirect policy
// and other settings
// See https://golang.org/pkg/net/http
type Client struct {
HTTPClient *http.Client
}
// The following functions enable the ability to define a
// custom HTTP Client
// MakeRequest makes the API call.
func (c *Client) MakeRequest(req *http.Request) (*http.Response, error) {
return c.HTTPClient.Do(req)
}
// API is the main interface to the API.
func (c *Client) API(r *Request) (*Response, error) {
// Build the HTTP request object.
req, err := BuildRequestObject(r)
if err != nil {
return nil, err
}
// Build the HTTP client and make the request.
res, err := c.MakeRequest(req)
if err != nil {
return nil, err
}
// Build Response object.
response, err := BuildResponse(res)
if err != nil {
return nil, err
}
return response, nil
}
// AddQueryParameters adds query parameters to the URL.
func AddQueryParameters(baseURL string, queryParams map[string]string) string {
baseURL += "?"
params := url.Values{}
for key, value := range queryParams {
params.Add(key, value)
}
return baseURL + params.Encode()
}
// BuildRequestObject creates the HTTP request object.
func BuildRequestObject(r *Request) (*http.Request, error) {
// Add any query parameters to the URL.
if len(r.QueryParams) != 0 {
r.BaseURL = AddQueryParameters(r.BaseURL, r.QueryParams)
}
req, err := http.NewRequest(string(r.Method), r.BaseURL, bytes.NewBuffer(r.Body))
for key, value := range r.Headers {
req.Header.Set(key, value)
}
_, exists := req.Header["Content-Type"]
if len(r.Body) > 0 && !exists {
req.Header.Set("Content-Type", "application/json")
}
return req, err
}
// BuildResponse builds the response struct.
func BuildResponse(r *http.Response) (*Response, error) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
defer r.Body.Close()
response := Response{
StatusCode: r.StatusCode,
Body: string(body),
Headers: r.Header,
}
return &response, nil
}
// MakeRequest makes the API call.
func MakeRequest(r *http.Request) (*http.Response, error) {
return DefaultClient.HTTPClient.Do(r)
}
// API is the main interface to the API.
func API(request *Request) (*Response, error) {
return DefaultClient.API(request)
}

138
vendor/github.com/pengsrc/go-shared/rest/rest_test.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
package rest
import (
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestBuildURL(t *testing.T) {
testURL := AddQueryParameters(
"http://api.test.com",
map[string]string{
"test": "1",
"test2": "2",
},
)
assert.Equal(t, "http://api.test.com?test=1&test2=2", testURL)
}
func TestBuildRequest(t *testing.T) {
request := Request{
Method: Get,
BaseURL: "http://api.test.com",
Headers: map[string]string{
"Content-Type": "application/json",
"Authorization": "Bearer APK_KEY",
},
QueryParams: map[string]string{
"test": "1",
"test2": "2",
},
}
req, err := BuildRequestObject(&request)
assert.NoError(t, err)
assert.NotNil(t, req)
}
func TestBuildResponse(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/not+json")
fmt.Fprintln(w, "{\"message\": \"success\"}")
}))
defer fakeServer.Close()
request := Request{
Method: Get,
BaseURL: fakeServer.URL,
}
req, err := BuildRequestObject(&request)
assert.NoError(t, err)
res, err := MakeRequest(req)
assert.NoError(t, err)
response, err := BuildResponse(res)
assert.NoError(t, err)
err = response.ParseJSON()
assert.Error(t, err)
assert.Equal(t, 200, response.StatusCode)
assert.NotEqual(t, 0, len(response.Body))
assert.NotEqual(t, 0, len(response.Headers))
assert.Nil(t, response.JSON)
}
func TestRest(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
fmt.Fprintln(w, "{\"message\": \"success\"}")
}))
defer fakeServer.Close()
request := Request{
Method: Get,
BaseURL: fakeServer.URL + "/test_endpoint",
Headers: map[string]string{
"Content-Type": "application/json",
"Authorization": "Bearer APK_KEY",
},
QueryParams: map[string]string{
"test": "1",
"test2": "2",
},
}
response, err := API(&request)
assert.NoError(t, err)
err = response.ParseJSON()
assert.NoError(t, err)
assert.Equal(t, 200, response.StatusCode)
assert.NotEqual(t, 0, len(response.Body))
assert.NotEqual(t, 0, len(response.Headers))
assert.Equal(t, "success", response.JSON.Path("message").Data().(string))
}
func TestDefaultContentType(t *testing.T) {
request := Request{
Method: Get,
BaseURL: "http://localhost",
Body: []byte(`{"hello": "world"}`),
}
req, err := BuildRequestObject(&request)
assert.NoError(t, err)
assert.Equal(t, "application/json", req.Header.Get("Content-Type"))
}
func TestCustomContentType(t *testing.T) {
request := Request{
Method: Get,
BaseURL: "http://localhost",
Headers: map[string]string{"Content-Type": "custom"},
Body: []byte("Hello World"),
}
res, err := BuildRequestObject(&request)
assert.NoError(t, err)
assert.Equal(t, "custom", res.Header.Get("Content-Type"))
}
func TestCustomHTTPClient(t *testing.T) {
fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(time.Millisecond * 20)
fmt.Fprintln(w, "{\"message\": \"success\"}")
}))
defer fakeServer.Close()
request := Request{
Method: Get,
BaseURL: fakeServer.URL + "/test_endpoint",
}
customClient := &Client{&http.Client{Timeout: time.Millisecond * 10}}
_, err := customClient.API(&request)
assert.True(t, strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers"))
}

32
vendor/github.com/pengsrc/go-shared/yaml/yaml.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package yaml
import (
"gopkg.in/yaml.v2"
)
// Encode encode given interface to yaml byte slice.
func Encode(source interface{}) ([]byte, error) {
bytesResult, err := yaml.Marshal(source)
if err != nil {
return []byte{}, err
}
return bytesResult, nil
}
// Decode decode given yaml byte slice to corresponding struct.
func Decode(content []byte, destinations ...interface{}) (interface{}, error) {
var destination interface{}
var err error
if len(destinations) == 1 {
destination = destinations[0]
err = yaml.Unmarshal(content, destination)
} else {
err = yaml.Unmarshal(content, &destination)
}
if err != nil {
return nil, err
}
return destination, err
}

72
vendor/github.com/pengsrc/go-shared/yaml/yaml_test.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package yaml
import (
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
func TestYAMLDecodeUnknown(t *testing.T) {
yamlString := `
key1: "This is a string." # Single Line Comment
key2: 10.50
key3:
- null
- nestedKey1: Anothor string
`
anyData, err := Decode([]byte(yamlString))
assert.NoError(t, err)
data := anyData.(map[interface{}]interface{})
assert.Equal(t, 10.50, data["key2"])
}
func TestYAMLDecodeKnown(t *testing.T) {
type SampleYAML struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
}
sampleYAMLString := `name: "NAME"`
sample := SampleYAML{Name: "NaMe", Description: "DeScRiPtIoN"}
anyDataPointer, err := Decode([]byte(sampleYAMLString), &sample)
assert.NoError(t, err)
data := anyDataPointer.(*SampleYAML)
assert.Equal(t, "NAME", sample.Name)
assert.Equal(t, "DeScRiPtIoN", sample.Description)
assert.Equal(t, "NAME", (*data).Name)
assert.Equal(t, "DeScRiPtIoN", (*data).Description)
_, err = Decode([]byte(`- - -`), &YAMLMustError{})
assert.Error(t, err)
}
func TestYAMLDecodeEmpty(t *testing.T) {
yamlString := ""
anyData, err := Decode([]byte(yamlString))
assert.NoError(t, err)
assert.Nil(t, anyData)
}
func TestYAMLEncode(t *testing.T) {
type SampleYAML struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
}
sample := SampleYAML{Name: "NaMe", Description: "DeScRiPtIoN"}
yamlBytes, err := Encode(sample)
assert.NoError(t, err)
assert.Equal(t, "name: NaMe\ndescription: DeScRiPtIoN\n", string(yamlBytes))
_, err = Encode(&YAMLMustError{})
assert.Error(t, err)
}
type YAMLMustError struct{}
func (*YAMLMustError) MarshalYAML() (interface{}, error) {
return nil, errors.New("marshal error")
}

30
vendor/github.com/yunify/qingstor-sdk-go/.gitignore generated vendored Normal file
View File

@ -0,0 +1,30 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
out
gen
vendor
coverage
release

6
vendor/github.com/yunify/qingstor-sdk-go/.gitmodules generated vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "specs/qingstor"]
path = specs/qingstor
url = https://github.com/yunify/qingstor-api-specs.git
[submodule "test/features"]
path = test/features
url = https://github.com/yunify/qingstor-sdk-test-scenarios.git

51
vendor/github.com/yunify/qingstor-sdk-go/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,51 @@
sudo: required
services:
- docker
language: go
go:
- 1.7
env:
matrix:
- GO_VERSION=1.8
- GO_VERSION=1.7
- GO_VERSION=1.6
- GO_VERSION=1.5
cache:
directories:
- ${HOME}/source
before_install:
- pushd ${HOME}/source
- if [[ ! -d "./make-4.0" ]]; then
wget http://ftp.gnu.org/gnu/make/make-4.0.tar.gz &&
tar -vxzf make-4.0.tar.gz &&
pushd make-4.0 && ./configure && make && popd;
fi
- pushd make-4.0 && sudo make install && popd
- if [[ ! -d "./glide-v0.12.3" ]]; then
wget https://github.com/Masterminds/glide/releases/download/v0.12.3/glide-v0.12.3-linux-amd64.tar.gz &&
tar -vxzf glide-v0.12.3-linux-amd64.tar.gz &&
mv linux-amd64 glide-v0.12.3;
fi
- pushd glide-v0.12.3 && sudo cp glide /usr/local/bin && popd
- popd
- /usr/local/bin/make --version
- /usr/local/bin/glide --version
install:
- go get -u github.com/yunify/snips
- go get -u github.com/golang/lint/golint;
- glide install
before_script:
- /usr/local/bin/make update
- /usr/local/bin/make generate
script:
- /usr/local/bin/make check
- /usr/local/bin/make release
- /usr/local/bin/make test-runtime-go-${GO_VERSION}

4
vendor/github.com/yunify/qingstor-sdk-go/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,4 @@
Patches have been contributed by (ordered by the time of the first merged patch):
Jingwen Peng <pengsrc@yunify.com>
Osier Yang <osier@yunify.com>

107
vendor/github.com/yunify/qingstor-sdk-go/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,107 @@
# Change Log
All notable changes to QingStor SDK for Go will be documented in this file.
## [v2.2.5] - 2017-05-22
### Fixed
- Fix error in request URL query.
- Fix error in request header value.
## [v2.2.4] - 2017-03-28
### Fixed
- Fix type of Content-Type header.
- Add Content-Length to GetObjectOutput.
- Fix status code of DELETE CORS API.
- Fix type of object size for GET Bucket API.
### BREAKING CHANGES
- The type of content length and object size has been changed from `*int` to `*int64`.
## [v2.2.3] - 2017-03-10
### Added
- Allow user to append additional info to User-Agent
## [v2.2.2] - 2017-03-08
### Fixed
- Resource is not mandatory in bucket policy statement
## [v2.2.1] - 2017-03-05
### Changed
- Add "Encrypted" field to "KeyType" struct
## [v2.2.0] - 2017-02-28
### Added
- Add ListMultipartUploads API.
### Fixed
- Fix request builder & signer.
## [v2.1.2] - 2017-01-16
### Fixed
- Fix request signer.
## [v2.1.1] - 2017-01-05
### Changed
- Fix logger output format, don't parse special characters.
- Rename package "errs" to "errors".
### Added
- Add type converters.
### BREAKING CHANGES
- Change value type in input and output to pointer.
## [v2.1.0] - 2016-12-23
### Changed
- Fix signer bug.
- Add more parameters to sign.
### Added
- Add request parameters for GET Object.
- Add IP address conditions for bucket policy.
## [v2.0.1] - 2016-12-15
### Changed
- Improve the implementation of deleting multiple objects.
## [v2.0.0] - 2016-12-14
### Added
- QingStor SDK for the Go programming language.
[v2.2.5]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.4...v2.2.5
[v2.2.4]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.3...v2.2.4
[v2.2.3]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.2...v2.2.3
[v2.2.2]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.1...v2.2.2
[v2.2.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.2.0...v2.2.1
[v2.2.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.2...v2.2.0
[v2.1.2]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.1...v2.1.2
[v2.1.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.1.0...v2.1.1
[v2.1.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.1...v2.1.0
[v2.0.1]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.0...v2.0.1
[v2.0.0]: https://github.com/yunify/qingstor-sdk-go/compare/v2.0.0...v2.0.0

201
vendor/github.com/yunify/qingstor-sdk-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2012 Yunify Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
vendor/github.com/yunify/qingstor-sdk-go/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1 @@
Jingwen Peng <pengsrc@yunify.com>

207
vendor/github.com/yunify/qingstor-sdk-go/Makefile generated vendored Normal file
View File

@ -0,0 +1,207 @@
SHELL := /bin/bash
PREFIX=qingstor-sdk-go
VERSION=$(shell cat version.go | grep "Version\ =" | sed -e s/^.*\ //g | sed -e s/\"//g)
DIRS_TO_CHECK=$(shell ls -d */ | grep -vE "vendor|test")
PKGS_TO_CHECK=$(shell go list ./... | grep -v "/vendor/")
PKGS_TO_RELEASE=$(shell go list ./... | grep -vE "/vendor/|/test")
FILES_TO_RELEASE=$(shell find . -name "*.go" | grep -vE "/vendor/|/test|.*_test.go")
FILES_TO_RELEASE_WITH_VENDOR=$(shell find . -name "*.go" | grep -vE "/test|.*_test.go")
.PHONY: help
help:
@echo "Please use \`make <target>\` where <target> is one of"
@echo " all to check, build, test and release this SDK"
@echo " check to vet and lint the SDK"
@echo " update to update git submodules"
@echo " generate to generate service code"
@echo " build to build the SDK"
@echo " test to run test"
@echo " test-coverage to run test with coverage"
@echo " test-race to run test with race"
@echo " test-runtime to run test in Go 1.8/1.7/1.6/1.5 in docker"
@echo " integration-test to run integration test"
@echo " release to build and release current version"
@echo " release-source to pack the source code"
@echo " clean to clean the coverage files"
.PHONY: all
all: check build unit release
.PHONY: check
check: vet lint
.PHONY: vet
vet:
@echo "go tool vet, skipping vendor packages"
@go tool vet -all ${DIRS_TO_CHECK}
@echo "ok"
.PHONY: lint
lint:
@echo "golint, skipping vendor packages"
@lint=$$(for pkg in ${PKGS_TO_CHECK}; do golint $${pkg}; done); \
lint=$$(echo "$${lint}"); \
if [[ -n $${lint} ]]; then echo "$${lint}"; exit 1; fi
@echo "ok"
.PHONY: update
update:
git submodule update --remote
@echo "ok"
.PHONY: generate
generate:
@if [[ ! -f "$$(which snips)" ]]; then \
echo "ERROR: Command \"snips\" not found."; \
fi
snips \
--service=qingstor --service-api-version=latest \
--spec="./specs" --template="./template" --output="./service"
gofmt -w .
@echo "ok"
.PHONY: build
build:
@echo "build the SDK"
GOOS=linux GOARCH=amd64 go build ${PKGS_TO_RELEASE}
GOOS=darwin GOARCH=amd64 go build ${PKGS_TO_RELEASE}
GOOS=windows GOARCH=amd64 go build ${PKGS_TO_RELEASE}
@echo "ok"
.PHONY: test
test:
@echo "run test"
go test -v ${PKGS_TO_RELEASE}
@echo "ok"
.PHONY: test-coverage
test-coverage:
@echo "run test with coverage"
for pkg in ${PKGS_TO_RELEASE}; do \
output="coverage$${pkg#github.com/yunify/qingstor-sdk-go}"; \
mkdir -p $${output}; \
go test -v -cover -coverprofile="$${output}/profile.out" $${pkg}; \
if [[ -e "$${output}/profile.out" ]]; then \
go tool cover -html="$${output}/profile.out" -o "$${output}/profile.html"; \
fi; \
done
@echo "ok"
.PHONY: test-race
test-race:
@echo "run test with race"
go test -v -race -cpu=1,2,4 ${PKGS_TO_RELEASE}
@echo "ok"
.PHONY: test-runtime
test-runtime: test-runtime-go-1.8 test-runtime-go-1.7 test-runtime-go-1.6 test-runtime-go-1.5
export define DOCKERFILE_GO_1_8
FROM golang:1.8
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.8
test-runtime-go-1.8:
@echo "run test in go 1.8"
echo "$${DOCKERFILE_GO_1_8}" > "dockerfile_go_1.8"
docker build -f "./dockerfile_go_1.8" -t "${PREFIX}:go-1.8" .
rm -f "./dockerfile_go_1.8"
docker run --name "${PREFIX}-go-1.8-unit" -t "${PREFIX}:go-1.8"
docker rm "${PREFIX}-go-1.8-unit"
docker rmi "${PREFIX}:go-1.8"
@echo "ok"
export define DOCKERFILE_GO_1_7
FROM golang:1.7
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.7
test-runtime-go-1.7:
@echo "run test in go 1.7"
echo "$${DOCKERFILE_GO_1_7}" > "dockerfile_go_1.7"
docker build -f "./dockerfile_go_1.7" -t "${PREFIX}:go-1.7" .
rm -f "./dockerfile_go_1.7"
docker run --name "${PREFIX}-go-1.7-unit" -t "${PREFIX}:go-1.7"
docker rm "${PREFIX}-go-1.7-unit"
docker rmi "${PREFIX}:go-1.7"
@echo "ok"
export define DOCKERFILE_GO_1_6
FROM golang:1.6
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.6
test-runtime-go-1.6:
@echo "run test in go 1.6"
echo "$${DOCKERFILE_GO_1_6}" > "dockerfile_go_1.6"
docker build -f "./dockerfile_go_1.6" -t "${PREFIX}:go-1.6" .
rm -f "./dockerfile_go_1.6"
docker run --name "${PREFIX}-go-1.6-unit" -t "${PREFIX}:go-1.6"
docker rm "${PREFIX}-go-1.6-unit"
docker rmi "${PREFIX}:go-1.6"
@echo "ok"
export define DOCKERFILE_GO_1_5
FROM golang:1.5
ENV GO15VENDOREXPERIMENT="1"
ADD . /go/src/github.com/yunify/qingstor-sdk-go
WORKDIR /go/src/github.com/yunify/qingstor-sdk-go
CMD ["make", "build", "test", "test-coverage"]
endef
.PHONY: test-runtime-go-1.5
test-runtime-go-1.5:
@echo "run test in go 1.5"
echo "$${DOCKERFILE_GO_1_5}" > "dockerfile_go_1.5"
docker build -f "dockerfile_go_1.5" -t "${PREFIX}:go-1.5" .
rm -f "dockerfile_go_1.5"
docker run --name "${PREFIX}-go-1.5-unit" -t "${PREFIX}:go-1.5"
docker rm "${PREFIX}-go-1.5-unit"
docker rmi "${PREFIX}:go-1.5"
@echo "ok"
.PHONY: integration-test
integration-test:
@echo "run integration test"
pushd "./test"; go run *.go; popd
@echo "ok"
.PHONY: release
release: release-source release-source-with-vendor
.PHONY: release-source
release-source:
@echo "pack the source code"
mkdir -p "release"
zip -FS "release/${PREFIX}-source-v${VERSION}.zip" ${FILES_TO_RELEASE}
@echo "ok"
.PHONY: release-source-with-vendor
release-source-with-vendor:
@echo "pack the source code"
mkdir -p "release"
zip -FS "release/${PREFIX}-source-with-vendor-v${VERSION}.zip" ${FILES_TO_RELEASE_WITH_VENDOR}
@echo "ok"
.PHONY: clean
clean:
rm -rf $${PWD}/coverage
@echo "ok"

84
vendor/github.com/yunify/qingstor-sdk-go/README.md generated vendored Normal file
View File

@ -0,0 +1,84 @@
# QingStor SDK for Go
[![Build Status](https://travis-ci.org/yunify/qingstor-sdk-go.svg?branch=master)](https://travis-ci.org/yunify/qingstor-sdk-go)
[![Go Report Card](https://goreportcard.com/badge/github.com/yunify/qingstor-sdk-go)](https://goreportcard.com/report/github.com/yunify/qingstor-sdk-go)
[![API Reference](http://img.shields.io/badge/api-reference-green.svg)](http://docs.qingcloud.com/qingstor/)
[![License](http://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/yunify/qingstor-sdk-go/blob/master/LICENSE)
The official QingStor SDK for the Go programming language.
## Getting Started
### Installation
Refer to the [Installation Guide](docs/installation.md), and have this SDK installed.
### Preparation
Before your start, please go to [QingCloud Console](https://console.qingcloud.com/access_keys/) to create a pair of QingCloud API AccessKey.
___API AccessKey Example:___
``` yaml
access_key_id: 'ACCESS_KEY_ID_EXAMPLE'
secret_access_key: 'SECRET_ACCESS_KEY_EXAMPLE'
```
### Usage
Now you are ready to code. You can read the detailed guides in the list below to have a clear understanding or just take the quick start code example.
Checkout our [releases](https://github.com/yunify/qingstor-sdk-go/releases) and [change log](https://github.com/yunify/qingstor-sdk-go/blob/master/CHANGELOG.md) for information about the latest features, bug fixes and new ideas.
- [Configuration Guide](docs/configuration.md)
- [QingStor Service Usage Guide](docs/qingstor_service_usage.md)
___Quick Start Code Example:___
``` go
package main
import (
"fmt"
"github.com/yunify/qingstor-sdk-go/config"
qs "github.com/yunify/qingstor-sdk-go/service"
)
func main() {
conf, _ := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY")
// Initialize service object for QingStor.
qsService, _ := qs.Init(conf)
// List all buckets.
qsOutput, _ := qsService.ListBuckets(&qs.ListBucketsInput{})
// Print HTTP status code.
fmt.Println(qs.IntValue(qsOutput.StatusCode))
// Print the count of buckets.
fmt.Println(qs.IntValue(qsOutput.Count))
// Print the first bucket name.
fmt.Println(qs.StringValue(qsOutput.Buckets[0].Name))
}
```
## Reference Documentations
- [QingStor Documentation](https://docs.qingcloud.com/qingstor/index.html)
- [QingStor Guide](https://docs.qingcloud.com/qingstor/guide/index.html)
- [QingStor APIs](https://docs.qingcloud.com/qingstor/api/index.html)
## Contributing
1. Fork it ( https://github.com/yunify/qingstor-sdk-go/fork )
2. Create your feature branch (`git checkout -b new-feature`)
3. Commit your changes (`git commit -asm 'Add some feature'`)
4. Push to the branch (`git push origin new-feature`)
5. Create a new Pull Request
## LICENSE
The Apache License (Version 2.0, January 2004).

View File

@ -0,0 +1,172 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package config
import (
"errors"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/pengsrc/go-shared/yaml"
"github.com/yunify/qingstor-sdk-go/logger"
)
// A Config stores a configuration of this sdk.
type Config struct {
AccessKeyID string `yaml:"access_key_id"`
SecretAccessKey string `yaml:"secret_access_key"`
Host string `yaml:"host"`
Port int `yaml:"port"`
Protocol string `yaml:"protocol"`
ConnectionRetries int `yaml:"connection_retries"`
AdditionalUserAgent string `yaml:"additional_user_agent"`
LogLevel string `yaml:"log_level"`
Connection *http.Client
}
// New create a Config with given AccessKeyID and SecretAccessKey.
func New(accessKeyID, secretAccessKey string) (*Config, error) {
config, err := NewDefault()
if err != nil {
return nil, err
}
config.AccessKeyID = accessKeyID
config.SecretAccessKey = secretAccessKey
config.Connection = &http.Client{}
return config, nil
}
// NewDefault create a Config with default configuration.
func NewDefault() (*Config, error) {
config := &Config{}
err := config.LoadDefaultConfig()
if err != nil {
return nil, err
}
config.Connection = &http.Client{}
return config, nil
}
// Check checks the configuration.
func (c *Config) Check() error {
if c.AccessKeyID == "" {
return errors.New("access key ID not specified")
}
if c.SecretAccessKey == "" {
return errors.New("secret access key not specified")
}
if c.Host == "" {
return errors.New("server host not specified")
}
if c.Port <= 0 {
return errors.New("server port not specified")
}
if c.Protocol == "" {
return errors.New("server protocol not specified")
}
if c.AdditionalUserAgent != "" {
for _, x := range c.AdditionalUserAgent {
// Allow space(32) to ~(126) in ASCII Table, exclude "(34).
if int(x) < 32 || int(x) > 126 || int(x) == 32 || int(x) == 34 {
return errors.New("additional User-Agent contains characters that not allowed")
}
}
}
err := logger.CheckLevel(c.LogLevel)
if err != nil {
return err
}
return nil
}
// LoadDefaultConfig loads the default configuration for Config.
// It returns error if yaml decode failed.
func (c *Config) LoadDefaultConfig() error {
_, err := yaml.Decode([]byte(DefaultConfigFileContent), c)
if err != nil {
logger.Error("Config parse error: " + err.Error())
return err
}
logger.SetLevel(c.LogLevel)
return nil
}
// LoadUserConfig loads user configuration in ~/.qingstor/config.yaml for Config.
// It returns error if file not found.
func (c *Config) LoadUserConfig() error {
_, err := os.Stat(GetUserConfigFilePath())
if err != nil {
logger.Warn("Installing default config file to \"" + GetUserConfigFilePath() + "\"")
InstallDefaultUserConfig()
}
return c.LoadConfigFromFilePath(GetUserConfigFilePath())
}
// LoadConfigFromFilePath loads configuration from a specified local path.
// It returns error if file not found or yaml decode failed.
func (c *Config) LoadConfigFromFilePath(filepath string) error {
if strings.Index(filepath, "~/") == 0 {
filepath = strings.Replace(filepath, "~/", getHome()+"/", 1)
}
yamlString, err := ioutil.ReadFile(filepath)
if err != nil {
logger.Error("File not found: " + filepath)
return err
}
return c.LoadConfigFromContent(yamlString)
}
// LoadConfigFromContent loads configuration from a given byte slice.
// It returns error if yaml decode failed.
func (c *Config) LoadConfigFromContent(content []byte) error {
c.LoadDefaultConfig()
_, err := yaml.Decode(content, c)
if err != nil {
logger.Error("Config parse error: " + err.Error())
return err
}
err = c.Check()
if err != nil {
return err
}
logger.SetLevel(c.LogLevel)
return nil
}

View File

@ -0,0 +1,108 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package config
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/yunify/qingstor-sdk-go/logger"
)
func TestConfig(t *testing.T) {
c := Config{
AccessKeyID: "AccessKeyID",
SecretAccessKey: "SecretAccessKey",
Host: "qingstor.dev",
Port: 443,
Protocol: "https",
ConnectionRetries: 10,
LogLevel: "warn",
}
assert.Equal(t, "AccessKeyID", c.AccessKeyID)
assert.Equal(t, "SecretAccessKey", c.SecretAccessKey)
assert.Equal(t, "qingstor.dev", c.Host)
assert.Equal(t, 10, c.ConnectionRetries)
assert.Equal(t, "warn", c.LogLevel)
c.AdditionalUserAgent = `"`
assert.Error(t, c.Check())
c.AdditionalUserAgent = `test/user`
assert.NoError(t, c.Check())
}
func TestLoadDefaultConfig(t *testing.T) {
config := Config{}
config.LoadDefaultConfig()
assert.Equal(t, "", config.AccessKeyID)
assert.Equal(t, "", config.SecretAccessKey)
assert.Equal(t, "https", config.Protocol)
assert.Equal(t, "qingstor.com", config.Host)
assert.Equal(t, "", config.AdditionalUserAgent)
assert.Equal(t, "warning", logger.GetLevel())
}
func TestLoadUserConfig(t *testing.T) {
config := Config{}
config.LoadUserConfig()
assert.NotNil(t, config.Host)
assert.NotNil(t, config.Protocol)
}
func TestLoadConfigFromContent(t *testing.T) {
fileContent := `
access_key_id: 'access_key_id'
secret_access_key: 'secret_access_key'
log_level: 'debug'
`
config := Config{}
config.LoadConfigFromContent([]byte(fileContent))
assert.Equal(t, "access_key_id", config.AccessKeyID)
assert.Equal(t, "secret_access_key", config.SecretAccessKey)
assert.Equal(t, "https", config.Protocol)
assert.Equal(t, "qingstor.com", config.Host)
assert.Equal(t, "debug", logger.GetLevel())
}
func TestNewDefault(t *testing.T) {
config, err := NewDefault()
assert.Nil(t, err)
assert.Equal(t, "", config.AccessKeyID)
assert.Equal(t, "", config.SecretAccessKey)
assert.Equal(t, "https", config.Protocol)
assert.Equal(t, "qingstor.com", config.Host)
assert.Equal(t, 3, config.ConnectionRetries)
}
func TestNew(t *testing.T) {
config, err := New("AccessKeyID", "SecretAccessKey")
assert.Nil(t, err)
assert.Equal(t, "AccessKeyID", config.AccessKeyID)
assert.Equal(t, "SecretAccessKey", config.SecretAccessKey)
assert.Equal(t, "https", config.Protocol)
assert.Equal(t, "qingstor.com", config.Host)
}

View File

@ -0,0 +1,74 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package config
import (
"io/ioutil"
"os"
"path"
"runtime"
"strings"
)
// DefaultConfigFileContent is the content of default config file.
const DefaultConfigFileContent = `# QingStor services configuration
#access_key_id: ACCESS_KEY_ID
#secret_access_key: SECRET_ACCESS_KEY
host: qingstor.com
port: 443
protocol: https
connection_retries: 3
# Additional User-Agent
additional_user_agent: ""
# Valid log levels are "debug", "info", "warn", "error", and "fatal".
log_level: warn
`
// DefaultConfigFile is the filename of default config file.
const DefaultConfigFile = "~/.qingstor/config.yaml"
// GetUserConfigFilePath returns the user config file path.
func GetUserConfigFilePath() string {
return strings.Replace(DefaultConfigFile, "~/", getHome()+"/", 1)
}
// InstallDefaultUserConfig will install default config file.
func InstallDefaultUserConfig() error {
err := os.MkdirAll(path.Dir(GetUserConfigFilePath()), 0755)
if err != nil {
return err
}
return ioutil.WriteFile(GetUserConfigFilePath(), []byte(DefaultConfigFileContent), 0644)
}
func getHome() string {
home := os.Getenv("HOME")
if runtime.GOOS == "windows" {
home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
}
return home
}

View File

@ -0,0 +1,69 @@
# Configuration Guide
## Summary
This SDK uses a structure called "Config" to store and manage configuration, read comments of public functions in ["config/config.go"](https://github.com/yunify/qingstor-sdk-go/blob/master/config/config.go) for details.
Except for Access Key, you can also configure the API endpoint for private cloud usage scenario. All available configurable items are listed in the default configuration file.
___Default Configuration File:___
``` yaml
# QingStor services configuration
access_key_id: 'ACCESS_KEY_ID'
secret_access_key: 'SECRET_ACCESS_KEY'
host: 'qingstor.com'
port: 443
protocol: 'https'
connection_retries: 3
# Valid log levels are "debug", "info", "warn", "error", and "fatal".
log_level: 'warn'
```
## Usage
Just create a config structure instance with your API Access Key, and initialize services you need with Init() function of the target service.
### Code Snippet
Create default configuration
``` go
defaultConfig, _ := config.NewDefault()
```
Create configuration from Access Key
``` go
configuration, _ := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY")
anotherConfiguration := config.NewDefault()
anotherConfiguration.AccessKeyID = "ACCESS_KEY_ID"
anotherConfiguration.SecretAccessKey = "SECRET_ACCESS_KEY"
```
Load user configuration
``` go
userConfig, _ := config.NewDefault().LoadUserConfig()
```
Load configuration from config file
``` go
configFromFile, _ := config.NewDefault().LoadConfigFromFilepath("PATH/TO/FILE")
```
Change API endpoint
``` go
moreConfiguration, _ := config.NewDefault()
moreConfiguration.Protocol = "http"
moreConfiguration.Host = "api.private.com"
moreConfiguration.Port = 80
```

View File

@ -0,0 +1,39 @@
# Installation Guide
## Requirement
This SDK requires Go 1.6 and higher vendor feature, the dependencies this project uses are included in the `vendor` directory. We use [glide](https://glide.sh) to manage project dependences.
___Notice:___ _You can also use Go 1.5 with the `GO15VENDOREXPERIMENT=1`._
## Install from source code
Use `go get` to download this SDK from GitHub:
``` bash
$ go get -u github.com/yunify/qingstor-sdk-go
```
You can also download a specified version of zipped source code in the repository [releases page](https://github.com/yunify/qingstor-sdk-go/releases). The zipped source code only contains golang source code without unit test files.
___Examples:___
- *[qingstor-sdk-go-source-v0.7.1.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-source-v0.7.1.zip)*
- *[qingstor-sdk-go-source-with-vendor-v0.7.1.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-source-with-vendor-v0.7.1.zip)*
## Install from binary release (deprecated)
After Go 1.7, there's a new feature called Binary-Only Package. It allows distributing packages in binary form without including the source code used for compiling the package. For more information about Binary-Only Package, please read [_GoLang Package Build_](https://golang.org/pkg/go/build/) to know how to use that.
We provide Linux, macOS and Windows binary packages along with a header files. A header file only contains three lines of content, "//go:binary-only-package" is the first line, the second line is blank, and the second is the package name. There's one header file named "binary.go" for each golang package.
You can download a specified version of zipped binary release in the repository [releases page](https://github.com/yunify/qingstor-sdk-go/releases).
___Notice:___ _We didn't provide 386 version binary packages, since there's almost no one using a 386 machine._
___Examples:___
- *[qingstor-sdk-go-header-v0.7.1-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-header-v0.7.1-go-1.7.zip)*
- *[qingstor-sdk-go-binary-v0.7.1-linux_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-linux_amd64-go-1.7.zip)*
- *[qingstor-sdk-go-binary-v0.7.1-darwin_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-darwin_amd64-go-1.7.zip)*
- *[qingstor-sdk-go-binary-v0.7.1-windows_amd64-go-1.7.zip](https://github.com/yunify/qingstor-sdk-go/releases/download/v0.7.1/qingstor-sdk-go-binary-v0.7.1-windows_amd64-go-1.7.zip)*

View File

@ -0,0 +1,220 @@
# QingStor Service Usage Guide
Import the QingStor and initialize service with a config, and you are ready to use the initialized service. Service only contains one API, and it is "ListBuckets".
To use bucket related APIs, you need to initialize a bucket from service using "Bucket" function.
Each API function take a Input struct and return an Output struct. The Input struct consists of request params, request headers, request elements and request body, and the Output holds the HTTP status code, QingStor request ID, response headers, response elements, response body and error (if error occurred).
You can use a specified version of a service by import a service package with a date suffix.
``` go
import (
// Import the latest version API
qs "github.com/yunify/qingstor-sdk-go/service"
)
```
### Code Snippet
Initialize the QingStor service with a configuration
``` go
qsService, _ := qs.Init(configuration)
```
List buckets
``` go
qsOutput, _ := qsService.ListBuckets(nil)
// Print the HTTP status code.
// Example: 200
fmt.Println(qs.IntValue(qsOutput.StatusCode))
// Print the bucket count.
// Example: 5
fmt.Println(qs.IntValue(qsOutput.Count))
// Print the name of first bucket.
// Example: "test-bucket"
fmt.Println(qs.String(qsOutput.Buckets[0].Name))
```
Initialize a QingStor bucket
``` go
bucket, _ := qsService.Bucket("test-bucket", "pek3a")
```
List objects in the bucket
``` go
bOutput, _ := bucket.ListObjects(nil)
// Print the HTTP status code.
// Example: 200
fmt.Println(qs.IntValue(bOutput.StatusCode))
// Print the key count.
// Example: 7
fmt.Println(len(bOutput.Keys))
```
Set ACL of the bucket
``` go
bACLOutput, _ := bucket.PutACL(&qs.PutBucketACLInput{
ACL: []*service.ACLType{{
Grantee: &service.GranteeType{
Type: qs.String("user"),
ID: qs.String("usr-xxxxxxxx"),
},
Permission: qs.String("FULL_CONTROL"),
}},
})
// Print the HTTP status code.
// Example: 200
fmt.Println(qs.IntValue(bACLOutput.StatusCode))
```
Put object
``` go
// Open file
file, _ := os.Open("/tmp/Screenshot.jpg")
defer file.Close()
// Calculate MD5
hash := md5.New()
io.Copy(hash, file)
hashInBytes := hash.Sum(nil)[:16]
md5String := hex.EncodeToString(hashInBytes)
// Put object
oOutput, _ := bucket.PutObject(
"Screenshot.jpg",
&service.PutObjectInput{
ContentLength: qs.Int(102475), // Obtain automatically if empty
ContentType: qs.String("image/jpeg"), // Detect automatically if empty
ContentMD5: qs.String(md5String),
Body: file,
},
)
// Print the HTTP status code.
// Example: 201
fmt.Println(qs.IntValue(oOutput.StatusCode))
```
Delete object
``` go
oOutput, _ := bucket.DeleteObject("Screenshot.jpg")
// Print the HTTP status code.
// Example: 204
fmt.Println(qs.IntValue(oOutput.StatusCode))
```
Initialize Multipart Upload
``` go
aOutput, _ := bucket.InitiateMultipartUpload(
"QingCloudInsight.mov",
&service.InitiateMultipartUploadInput{
ContentType: qs.String("video/quicktime"),
},
)
// Print the HTTP status code.
// Example: 200
fmt.Println(qs.IntValue(aOutput.StatusCode))
// Print the upload ID.
// Example: "9d37dd6ccee643075ca4e597ad65655c"
fmt.Println(qs.StringValue(aOutput.UploadID))
```
Upload Multipart
``` go
aOutput, _ := bucket.UploadMultipart(
"QingCloudInsight.mov",
&service.UploadMultipartInput{
UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
PartNumber: qs.Int(0),
ContentMD5: qs.String(md5String0),
Body: file0,
},
)
// Print the HTTP status code.
// Example: 201
fmt.Println(qs.IntValue(aOutput.StatusCode))
aOutput, _ = bucket.UploadMultipart(
"QingCloudInsight.mov",
&service.UploadMultipartInput{
UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
PartNumber: qs.Int(1),
ContentMD5: qs.String(md5String1),
Body: file1,
},
)
// Print the HTTP status code.
// Example: 201
fmt.Println(qs.IntValue(aOutput.StatusCode))
aOutput, _ = bucket.UploadMultipart(
"QingCloudInsight.mov"
&service.UploadMultipartInput{
UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
PartNumber: qs.Int(2),
ContentMD5: qs.String(md5String2),
Body: file2,
},
)
// Print the HTTP status code.
// Example: 201
fmt.Println(qs.IntValue(aOutput.StatusCode))
```
Complete Multipart Upload
``` go
aOutput, _ := bucket.CompleteMultipartUpload(
"QingCloudInsight.mov",
&service.CompleteMultipartUploadInput{
UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
ObjectParts: []*service.ObjectPart{{
PartNumber: qs.Int(0),
}, {
PartNumber: qs.Int(1),
}, {
PartNumber: qs.Int(2),
}},
},
)
// Print the HTTP status code.
// Example: 200
fmt.Println(qs.IntValue(aOutput.StatusCode))
```
Abort Multipart Upload
``` go
aOutput, err := bucket.AbortMultipartUpload(
"QingCloudInsight.mov"
&service.AbortMultipartUploadInput{
UploadID: qs.String("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
},
)
// Print the error message.
// Example: QingStor Error: StatusCode 400, Code...
fmt.Println(err)
```

33
vendor/github.com/yunify/qingstor-sdk-go/glide.lock generated vendored Normal file
View File

@ -0,0 +1,33 @@
hash: 2e6b1ed4a2ee0638abc2e819ac3c247eaf80fa0f2053cfc41eecf883054c6032
updated: 2017-05-22T14:58:58.927797848+08:00
imports:
- name: github.com/pengsrc/go-shared
version: 454950d6a0782c34427d4f29b46c6bf447256f20
subpackages:
- check
- convert
- json
- yaml
- name: github.com/Sirupsen/logrus
version: d26492970760ca5d33129d2d799e34be5c4782eb
- name: golang.org/x/sys
version: f3918c30c5c2cb527c0b071a27c35120a6c0719a
repo: https://github.com/golang/sys.git
subpackages:
- unix
- name: gopkg.in/yaml.v2
version: a5b47d31c556af34a302ce5d659e6fea44d90de0
repo: https://github.com/go-yaml/yaml.git
testImports:
- name: github.com/davecgh/go-spew
version: 346938d642f2ec3594ed81d874461961cd0faa76
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: 792786c7400a136282c1664665ae0a8db921c6c2
subpackages:
- difflib
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
subpackages:
- assert

4
vendor/github.com/yunify/qingstor-sdk-go/glide.yaml generated vendored Normal file
View File

@ -0,0 +1,4 @@
package: github.com/yunify/qingstor-sdk-go
import:
- package: github.com/pengsrc/go-shared
version: v0.0.8

View File

@ -0,0 +1,119 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
// Package logger provides support for logging to stdout and stderr.
// Log entries will be logged with format: $timestamp $hostname [$pid]: $severity $message.
package logger
import (
"fmt"
"io"
"os"
"strings"
"time"
"github.com/Sirupsen/logrus"
)
var instance *logrus.Logger
// LogFormatter is used to format log entry.
type LogFormatter struct{}
// Format formats a given log entry, returns byte slice and error.
func (c *LogFormatter) Format(entry *logrus.Entry) ([]byte, error) {
level := strings.ToUpper(entry.Level.String())
if level == "WARNING" {
level = "WARN"
}
if len(level) < 5 {
level = strings.Repeat(" ", 5-len(level)) + level
}
return []byte(fmt.Sprintf(
"[%s #%d] %s -- : %s\n",
time.Now().Format("2006-01-02T15:04:05.000Z"),
os.Getpid(),
level,
entry.Message)), nil
}
// SetOutput set the destination for the log output
func SetOutput(out io.Writer) {
instance.Out = out
}
// CheckLevel checks whether the log level is valid.
func CheckLevel(level string) error {
if _, err := logrus.ParseLevel(level); err != nil {
return fmt.Errorf(`log level not valid: "%s"`, level)
}
return nil
}
// GetLevel get the log level string.
func GetLevel() string {
return instance.Level.String()
}
// SetLevel sets the log level. Valid levels are "debug", "info", "warn", "error", and "fatal".
func SetLevel(level string) {
lvl, err := logrus.ParseLevel(level)
if err != nil {
Fatal(fmt.Sprintf(`log level not valid: "%s"`, level))
}
instance.Level = lvl
}
// Debug logs a message with severity DEBUG.
func Debug(format string, v ...interface{}) {
output(instance.Debug, format, v...)
}
// Info logs a message with severity INFO.
func Info(format string, v ...interface{}) {
output(instance.Info, format, v...)
}
// Warn logs a message with severity WARN.
func Warn(format string, v ...interface{}) {
output(instance.Warn, format, v...)
}
// Error logs a message with severity ERROR.
func Error(format string, v ...interface{}) {
output(instance.Error, format, v...)
}
// Fatal logs a message with severity ERROR followed by a call to os.Exit().
func Fatal(format string, v ...interface{}) {
output(instance.Fatal, format, v...)
}
func output(origin func(...interface{}), format string, v ...interface{}) {
if len(v) > 0 {
origin(fmt.Sprintf(format, v...))
} else {
origin(format)
}
}
func init() {
instance = logrus.New()
instance.Formatter = &LogFormatter{}
instance.Out = os.Stderr
instance.Level = logrus.WarnLevel
}

View File

@ -0,0 +1,303 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package builder
import (
"errors"
"fmt"
"io"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"unicode"
"github.com/pengsrc/go-shared/convert"
"github.com/pengsrc/go-shared/json"
"github.com/yunify/qingstor-sdk-go/request/data"
"github.com/yunify/qingstor-sdk-go/utils"
)
// BaseBuilder is the base builder for all services.
type BaseBuilder struct {
parsedURL string
parsedProperties *map[string]string
parsedParams *map[string]string
parsedHeaders *map[string]string
parsedBodyString string
parsedBody io.Reader
operation *data.Operation
input *reflect.Value
}
// BuildHTTPRequest builds http request with an operation and an input.
func (b *BaseBuilder) BuildHTTPRequest(o *data.Operation, i *reflect.Value) (*http.Request, error) {
b.operation = o
b.input = i
_, err := b.parse()
if err != nil {
return nil, err
}
return b.build()
}
func (b *BaseBuilder) build() (*http.Request, error) {
httpRequest, err := http.NewRequest(b.operation.RequestMethod, b.parsedURL, b.parsedBody)
if err != nil {
return nil, err
}
err = b.setupHeaders(httpRequest)
if err != nil {
return nil, err
}
return httpRequest, nil
}
func (b *BaseBuilder) parse() (*BaseBuilder, error) {
err := b.parseRequestParamsAndHeaders()
if err != nil {
return b, err
}
err = b.parseRequestBody()
if err != nil {
return b, err
}
err = b.parseRequestProperties()
if err != nil {
return b, err
}
err = b.parseRequestURL()
if err != nil {
return b, err
}
return b, nil
}
func (b *BaseBuilder) parseRequestParamsAndHeaders() error {
requestParams := map[string]string{}
requestHeaders := map[string]string{}
maps := map[string](map[string]string){
"params": requestParams,
"headers": requestHeaders,
}
b.parsedParams = &requestParams
b.parsedHeaders = &requestHeaders
if !b.input.IsValid() {
return nil
}
fields := b.input.Elem()
if !fields.IsValid() {
return nil
}
for i := 0; i < fields.NumField(); i++ {
tagName := fields.Type().Field(i).Tag.Get("name")
tagLocation := fields.Type().Field(i).Tag.Get("location")
if tagDefault := fields.Type().Field(i).Tag.Get("default"); tagDefault != "" {
maps[tagLocation][tagName] = tagDefault
}
if tagName != "" && tagLocation != "" && maps[tagLocation] != nil {
switch value := fields.Field(i).Interface().(type) {
case *string:
if value != nil {
maps[tagLocation][tagName] = *value
}
case *int:
if value != nil {
maps[tagLocation][tagName] = strconv.Itoa(int(*value))
}
case *int64:
if value != nil {
maps[tagLocation][tagName] = strconv.FormatInt(int64(*value), 10)
}
case *bool:
case *time.Time:
if value != nil {
formatString := fields.Type().Field(i).Tag.Get("format")
format := ""
switch formatString {
case "RFC 822":
format = convert.RFC822
case "ISO 8601":
format = convert.ISO8601
}
maps[tagLocation][tagName] = convert.TimeToString(*value, format)
}
}
}
}
return nil
}
func (b *BaseBuilder) parseRequestBody() error {
requestData := map[string]interface{}{}
if !b.input.IsValid() {
return nil
}
fields := b.input.Elem()
if !fields.IsValid() {
return nil
}
for i := 0; i < fields.NumField(); i++ {
location := fields.Type().Field(i).Tag.Get("location")
if location == "elements" {
name := fields.Type().Field(i).Tag.Get("name")
requestData[name] = fields.Field(i).Interface()
}
}
if len(requestData) != 0 {
dataValue, err := json.Encode(requestData, true)
if err != nil {
return err
}
b.parsedBodyString = string(dataValue)
b.parsedBody = strings.NewReader(b.parsedBodyString)
(*b.parsedHeaders)["Content-Type"] = "application/json"
} else {
value := fields.FieldByName("Body")
if value.IsValid() {
switch value.Interface().(type) {
case string:
if value.String() != "" {
b.parsedBodyString = value.String()
b.parsedBody = strings.NewReader(value.String())
}
case io.Reader:
if value.Interface().(io.Reader) != nil {
b.parsedBody = value.Interface().(io.Reader)
}
}
}
}
return nil
}
func (b *BaseBuilder) parseRequestProperties() error {
propertiesMap := map[string]string{}
b.parsedProperties = &propertiesMap
if b.operation.Properties != nil {
fields := reflect.ValueOf(b.operation.Properties).Elem()
if fields.IsValid() {
for i := 0; i < fields.NumField(); i++ {
switch value := fields.Field(i).Interface().(type) {
case *string:
if value != nil {
propertiesMap[fields.Type().Field(i).Tag.Get("name")] = *value
}
case *int:
if value != nil {
numberString := strconv.Itoa(int(*value))
propertiesMap[fields.Type().Field(i).Tag.Get("name")] = numberString
}
}
}
}
}
return nil
}
func (b *BaseBuilder) parseRequestURL() error {
return nil
}
func (b *BaseBuilder) setupHeaders(httpRequest *http.Request) error {
if b.parsedHeaders != nil {
for headerKey, headerValue := range *b.parsedHeaders {
if headerKey == "X-QS-Fetch-Source" {
// header X-QS-Fetch-Source is a URL to fetch.
// We should first parse this URL.
requestURL, err := url.Parse(headerValue)
if err != nil {
return fmt.Errorf("invalid HTTP header value: %s", headerValue)
}
headerValue = requestURL.String()
} else {
for _, r := range headerValue {
if r > unicode.MaxASCII {
headerValue = utils.URLQueryEscape(headerValue)
break
}
}
}
httpRequest.Header.Set(headerKey, headerValue)
}
}
if httpRequest.Header.Get("Content-Length") == "" {
var length int64
switch body := b.parsedBody.(type) {
case nil:
length = 0
case io.Seeker:
//start, err := body.Seek(0, io.SeekStart)
start, err := body.Seek(0, 0)
if err != nil {
return err
}
//end, err := body.Seek(0, io.SeekEnd)
end, err := body.Seek(0, 2)
if err != nil {
return err
}
//body.Seek(0, io.SeekStart)
body.Seek(0, 0)
length = end - start
default:
return errors.New("Can not get Content-Length")
}
if length > 0 {
httpRequest.ContentLength = length
httpRequest.Header.Set("Content-Length", strconv.Itoa(int(length)))
} else {
httpRequest.Header.Set("Content-Length", "0")
}
}
length, err := strconv.Atoi(httpRequest.Header.Get("Content-Length"))
if err != nil {
return err
}
httpRequest.ContentLength = int64(length)
if httpRequest.Header.Get("Date") == "" {
httpRequest.Header.Set("Date", convert.TimeToString(time.Now(), convert.RFC822))
}
return nil
}

View File

@ -0,0 +1,125 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package builder
import (
"bytes"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/yunify/qingstor-sdk-go/config"
"github.com/yunify/qingstor-sdk-go/request/data"
)
type FakeProperties struct {
A *string `name:"a"`
B *string `name:"b"`
CD *int `name:"c-d"`
}
type FakeInput struct {
ParamA *string `location:"params" name:"a"`
ParamB *string `location:"params" name:"b"`
ParamCD *int `location:"params" name:"c_d" default:"1024"`
HeaderA *string `location:"headers" name:"A"`
HeaderB *time.Time `location:"headers" name:"B" format:"RFC 822"`
HeaderCD *int `location:"headers" name:"C-D"`
ElementA *string `location:"elements" name:"a"`
ElementB *string `location:"elements" name:"b"`
ElementCD *int64 `location:"elements" name:"cd"`
Body *string `localtion:"body"`
}
func (i *FakeInput) Validate() error {
return nil
}
func String(v string) *string {
return &v
}
func Int(v int) *int {
return &v
}
func Int64(v int64) *int64 {
return &v
}
func Time(v time.Time) *time.Time {
return &v
}
func TestBaseBuilder_BuildHTTPRequest(t *testing.T) {
conf, err := config.NewDefault()
assert.Nil(t, err)
tz, err := time.LoadLocation("Asia/Shanghai")
assert.Nil(t, err)
builder := BaseBuilder{}
operation := &data.Operation{
Config: conf,
APIName: "This is API name",
ServiceName: "Base",
Properties: &FakeProperties{
A: String("property_a"),
B: String("property_b"),
CD: Int(0),
},
RequestMethod: "GET",
RequestURI: "/hello/<a>/<c-d>/<b>/world",
StatusCodes: []int{
200,
201,
},
}
inputValue := reflect.ValueOf(&FakeInput{
ParamA: String("param_a"),
ParamCD: Int(1024),
HeaderA: String("header_a"),
HeaderB: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, tz)),
ElementA: String("element_a"),
ElementB: String("element_b"),
ElementCD: Int64(0),
Body: String("This is body string"),
})
httpRequest, err := builder.BuildHTTPRequest(operation, &inputValue)
assert.Nil(t, err)
assert.Equal(t, &map[string]string{
"a": "property_a",
"b": "property_b",
"c-d": "0",
}, builder.parsedProperties)
assert.Equal(t, &map[string]string{
"a": "param_a",
"c_d": "1024",
}, builder.parsedParams)
assert.Equal(t, &map[string]string{
"A": "header_a",
"B": "Thu, 01 Sep 2016 07:30:00 GMT",
"Content-Type": "application/json",
}, builder.parsedHeaders)
assert.NotNil(t, httpRequest.Header.Get("Date"))
assert.Equal(t, "40", httpRequest.Header.Get("Content-Length"))
buffer := &bytes.Buffer{}
buffer.ReadFrom(httpRequest.Body)
httpRequest.Body.Close()
assert.Equal(t, "{\"a\":\"element_a\",\"b\":\"element_b\",\"cd\":0}", buffer.String())
}

View File

@ -0,0 +1,167 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package builder
import (
"bytes"
"crypto/md5"
"encoding/base64"
"fmt"
"io/ioutil"
"mime"
"net/http"
"net/url"
"path"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/pengsrc/go-shared/convert"
"github.com/yunify/qingstor-sdk-go"
"github.com/yunify/qingstor-sdk-go/logger"
"github.com/yunify/qingstor-sdk-go/request/data"
"github.com/yunify/qingstor-sdk-go/utils"
)
// QingStorBuilder is the request builder for QingStor service.
type QingStorBuilder struct {
baseBuilder *BaseBuilder
}
// BuildHTTPRequest builds http request with an operation and an input.
func (qb *QingStorBuilder) BuildHTTPRequest(o *data.Operation, i *reflect.Value) (*http.Request, error) {
qb.baseBuilder = &BaseBuilder{}
qb.baseBuilder.operation = o
qb.baseBuilder.input = i
_, err := qb.baseBuilder.parse()
if err != nil {
return nil, err
}
err = qb.parseURL()
if err != nil {
return nil, err
}
httpRequest, err := http.NewRequest(qb.baseBuilder.operation.RequestMethod,
qb.baseBuilder.parsedURL, qb.baseBuilder.parsedBody)
if err != nil {
return nil, err
}
err = qb.baseBuilder.setupHeaders(httpRequest)
if err != nil {
return nil, err
}
err = qb.setupHeaders(httpRequest)
if err != nil {
return nil, err
}
logger.Info(fmt.Sprintf(
"Built QingStor request: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
httpRequest.URL.String()),
)
logger.Info(fmt.Sprintf(
"QingStor request headers: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
fmt.Sprint(httpRequest.Header)),
)
if qb.baseBuilder.parsedBodyString != "" {
logger.Info(fmt.Sprintf(
"QingStor request body string: [%d] %s",
convert.StringToUnixTimestamp(httpRequest.Header.Get("Date"), convert.RFC822),
qb.baseBuilder.parsedBodyString),
)
}
return httpRequest, nil
}
func (qb *QingStorBuilder) parseURL() error {
config := qb.baseBuilder.operation.Config
zone := (*qb.baseBuilder.parsedProperties)["zone"]
port := strconv.Itoa(config.Port)
endpoint := config.Protocol + "://" + config.Host + ":" + port
if zone != "" {
endpoint = config.Protocol + "://" + zone + "." + config.Host + ":" + port
}
requestURI := qb.baseBuilder.operation.RequestURI
for key, value := range *qb.baseBuilder.parsedProperties {
endpoint = strings.Replace(endpoint, "<"+key+">", utils.URLQueryEscape(value), -1)
requestURI = strings.Replace(requestURI, "<"+key+">", utils.URLQueryEscape(value), -1)
}
requestURI = regexp.MustCompile(`/+`).ReplaceAllString(requestURI, "/")
requestURL, err := url.Parse(endpoint + requestURI)
if err != nil {
return err
}
if qb.baseBuilder.parsedParams != nil {
queryValue := requestURL.Query()
for key, value := range *qb.baseBuilder.parsedParams {
queryValue.Set(key, value)
}
requestURL.RawQuery = queryValue.Encode()
}
qb.baseBuilder.parsedURL = requestURL.String()
return nil
}
func (qb *QingStorBuilder) setupHeaders(httpRequest *http.Request) error {
method := httpRequest.Method
if method == "POST" || method == "PUT" || method == "DELETE" {
if httpRequest.Header.Get("Content-Type") == "" {
mimeType := mime.TypeByExtension(path.Ext(httpRequest.URL.Path))
if mimeType != "" {
httpRequest.Header.Set("Content-Type", mimeType)
}
}
}
if httpRequest.Header.Get("User-Agent") == "" {
version := fmt.Sprintf(`Go v%s`, strings.Replace(runtime.Version(), "go", "", -1))
system := fmt.Sprintf(`%s_%s_%s`, runtime.GOOS, runtime.GOARCH, runtime.Compiler)
ua := fmt.Sprintf(`qingstor-sdk-go/%s (%s; %s)`, sdk.Version, version, system)
if qb.baseBuilder.operation.Config.AdditionalUserAgent != "" {
ua = fmt.Sprintf(`%s %s`, ua, qb.baseBuilder.operation.Config.AdditionalUserAgent)
}
httpRequest.Header.Set("User-Agent", ua)
}
if qb.baseBuilder.operation.APIName == "Delete Multiple Objects" {
buffer := &bytes.Buffer{}
buffer.ReadFrom(httpRequest.Body)
httpRequest.Body = ioutil.NopCloser(bytes.NewReader(buffer.Bytes()))
md5Value := md5.Sum(buffer.Bytes())
httpRequest.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(md5Value[:]))
}
return nil
}

View File

@ -0,0 +1,83 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package builder
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/yunify/qingstor-sdk-go/config"
"github.com/yunify/qingstor-sdk-go/request/data"
)
type ObjectSubServiceProperties struct {
BucketName *string `json:"bucket-name" name:"bucket-name"`
ObjectKey *string `json:"object-key" name:"object-key"`
Zone *string `json:"zone" name:"zone"`
}
type GetObjectInput struct {
IfMatch *string `json:"If-Match" name:"If-Match" location:"headers"`
IfModifiedSince *time.Time `json:"If-Modified-Since" name:"If-Modified-Since" format:"RFC 822" location:"headers"`
IfNoneMatch *string `json:"If-None-Match" name:"If-None-Match" location:"headers"`
IfUnmodifiedSince time.Time `json:"If-Unmodified-Since" name:"If-Unmodified-Since" format:"RFC 822" location:"headers"`
// Specified range of the Object
Range *string `json:"Range" name:"Range" location:"headers"`
}
func (i *GetObjectInput) Validate() error {
return nil
}
func TestQingStorBuilder_BuildHTTPRequest(t *testing.T) {
conf, err := config.NewDefault()
assert.Nil(t, err)
conf.Host = "qingstor.dev"
tz, err := time.LoadLocation("Asia/Shanghai")
assert.Nil(t, err)
qsBuilder := &QingStorBuilder{}
operation := &data.Operation{
Config: conf,
APIName: "GET Object",
ServiceName: "QingStor",
Properties: &ObjectSubServiceProperties{
BucketName: String("test"),
ObjectKey: String("path/to/key.txt"),
Zone: String("beta"),
},
RequestMethod: "GET",
RequestURI: "/<bucket-name>/<object-key>",
StatusCodes: []int{
201,
},
}
inputValue := reflect.ValueOf(&GetObjectInput{
IfModifiedSince: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, tz)),
Range: String("100-"),
})
httpRequest, err := qsBuilder.BuildHTTPRequest(operation, &inputValue)
assert.Nil(t, err)
assert.NotNil(t, httpRequest.Header.Get("Date"))
assert.Equal(t, "0", httpRequest.Header.Get("Content-Length"))
assert.Equal(t, "", httpRequest.Header.Get("If-Match"))
assert.Equal(t, "Thu, 01 Sep 2016 07:30:00 GMT", httpRequest.Header.Get("If-Modified-Since"))
assert.Equal(t, "100-", httpRequest.Header.Get("Range"))
assert.Equal(t, "https://beta.qingstor.dev:443/test/path/to/key.txt", httpRequest.URL.String())
}

View File

@ -0,0 +1,22 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package data
// Input defines the interfaces that input should implement.
type Input interface {
Validation
}

View File

@ -0,0 +1,35 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package data
import (
"github.com/yunify/qingstor-sdk-go/config"
)
// Operation stores information of an operation.
type Operation struct {
Config *config.Config
Properties interface{}
APIName string
ServiceName string
RequestMethod string
RequestURI string
StatusCodes []int
}

View File

@ -0,0 +1,22 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package data
// Validation defines the validate interface.
type Validation interface {
Validate() error
}

View File

@ -0,0 +1,53 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package errors
import (
"fmt"
"strings"
)
// ParameterRequiredError indicates that the required parameter is missing.
type ParameterRequiredError struct {
ParameterName string
ParentName string
}
// Error returns the description of ParameterRequiredError.
func (e ParameterRequiredError) Error() string {
return fmt.Sprintf(`"%s" is required in "%s"`, e.ParameterName, e.ParentName)
}
// ParameterValueNotAllowedError indicates that the parameter value is not allowed.
type ParameterValueNotAllowedError struct {
ParameterName string
ParameterValue string
AllowedValues []string
}
// Error returns the description of ParameterValueNotAllowedError.
func (e ParameterValueNotAllowedError) Error() string {
allowedValues := []string{}
for _, value := range e.AllowedValues {
allowedValues = append(allowedValues, "\""+value+"\"")
}
return fmt.Sprintf(
`"%s" value "%s" is not allowed, should be one of %s`,
e.ParameterName,
e.ParameterValue,
strings.Join(allowedValues, ", "))
}

View File

@ -0,0 +1,36 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package errors
import "fmt"
// QingStorError stores information of an QingStor error response.
type QingStorError struct {
StatusCode int
Code string `json:"code"`
Message string `json:"message"`
RequestID string `json:"request_id"`
ReferenceURL string `json:"url"`
}
// Error returns the description of QingStor error response.
func (qse QingStorError) Error() string {
return fmt.Sprintf(
"QingStor Error: StatusCode \"%d\", Code \"%s\", Message \"%s\", Request ID \"%s\", Reference URL \"%s\"",
qse.StatusCode, qse.Code, qse.Message, qse.RequestID, qse.ReferenceURL)
}

View File

@ -0,0 +1,232 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package request
import (
"errors"
"fmt"
"net/http"
"reflect"
"time"
"github.com/pengsrc/go-shared/convert"
"github.com/yunify/qingstor-sdk-go/logger"
"github.com/yunify/qingstor-sdk-go/request/builder"
"github.com/yunify/qingstor-sdk-go/request/data"
"github.com/yunify/qingstor-sdk-go/request/signer"
"github.com/yunify/qingstor-sdk-go/request/unpacker"
)
// A Request can build, sign, send and unpack API request.
type Request struct {
Operation *data.Operation
Input *reflect.Value
Output *reflect.Value
HTTPRequest *http.Request
HTTPResponse *http.Response
}
// New create a Request from given Operation, Input and Output.
// It returns a Request.
func New(o *data.Operation, i data.Input, x interface{}) (*Request, error) {
input := reflect.ValueOf(i)
if input.IsValid() && input.Elem().IsValid() {
err := i.Validate()
if err != nil {
return nil, err
}
}
output := reflect.ValueOf(x)
return &Request{
Operation: o,
Input: &input,
Output: &output,
}, nil
}
// Send sends API request.
// It returns error if error occurred.
func (r *Request) Send() error {
err := r.check()
if err != nil {
return err
}
err = r.build()
if err != nil {
return err
}
err = r.sign()
if err != nil {
return err
}
err = r.send()
if err != nil {
return err
}
err = r.unpack()
if err != nil {
return err
}
return nil
}
// Sign sign the API request by setting the authorization header.
// It returns error if error occurred.
func (r *Request) Sign() error {
err := r.check()
if err != nil {
return err
}
err = r.build()
if err != nil {
return err
}
err = r.sign()
if err != nil {
return err
}
return nil
}
// SignQuery sign the API request by appending query string.
// It returns error if error occurred.
func (r *Request) SignQuery(timeoutSeconds int) error {
err := r.check()
if err != nil {
return err
}
err = r.build()
if err != nil {
return err
}
err = r.signQuery(int(time.Now().Unix()) + timeoutSeconds)
if err != nil {
return err
}
return nil
}
func (r *Request) check() error {
if r.Operation.Config.AccessKeyID == "" {
return errors.New("access key not provided")
}
if r.Operation.Config.SecretAccessKey == "" {
return errors.New("secret access key not provided")
}
return nil
}
func (r *Request) build() error {
b := &builder.QingStorBuilder{}
httpRequest, err := b.BuildHTTPRequest(r.Operation, r.Input)
if err != nil {
return err
}
r.HTTPRequest = httpRequest
return nil
}
func (r *Request) sign() error {
s := &signer.QingStorSigner{
AccessKeyID: r.Operation.Config.AccessKeyID,
SecretAccessKey: r.Operation.Config.SecretAccessKey,
}
err := s.WriteSignature(r.HTTPRequest)
if err != nil {
return err
}
return nil
}
func (r *Request) signQuery(expires int) error {
s := &signer.QingStorSigner{
AccessKeyID: r.Operation.Config.AccessKeyID,
SecretAccessKey: r.Operation.Config.SecretAccessKey,
}
err := s.WriteQuerySignature(r.HTTPRequest, expires)
if err != nil {
return err
}
return nil
}
func (r *Request) send() error {
var response *http.Response
var err error
if r.Operation.Config.Connection == nil {
return errors.New("connection not initialized")
}
retries := r.Operation.Config.ConnectionRetries + 1
for {
if retries > 0 {
logger.Info(fmt.Sprintf(
"Sending request: [%d] %s %s",
convert.StringToUnixTimestamp(r.HTTPRequest.Header.Get("Date"), convert.RFC822),
r.Operation.RequestMethod,
r.HTTPRequest.Host,
))
response, err = r.Operation.Config.Connection.Do(r.HTTPRequest)
if err == nil {
retries = 0
} else {
retries--
time.Sleep(time.Second)
}
} else {
break
}
}
if err != nil {
return err
}
r.HTTPResponse = response
return nil
}
func (r *Request) unpack() error {
u := &unpacker.QingStorUnpacker{}
err := u.UnpackHTTPRequest(r.Operation, r.HTTPResponse, r.Output)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,136 @@
// +-------------------------------------------------------------------------
// | Copyright (C) 2016 Yunify, Inc.
// +-------------------------------------------------------------------------
// | Licensed under the Apache License, Version 2.0 (the "License");
// | you may not use this work except in compliance with the License.
// | You may obtain a copy of the License in the LICENSE file, or at:
// |
// | http://www.apache.org/licenses/LICENSE-2.0
// |
// | Unless required by applicable law or agreed to in writing, software
// | distributed under the License is distributed on an "AS IS" BASIS,
// | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// | See the License for the specific language governing permissions and
// | limitations under the License.
// +-------------------------------------------------------------------------
package request
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/yunify/qingstor-sdk-go/config"
"github.com/yunify/qingstor-sdk-go/logger"
"github.com/yunify/qingstor-sdk-go/request/data"
"github.com/yunify/qingstor-sdk-go/request/errors"
)
type SomeActionProperties struct {
A *string `json:"a" name:"a"`
B *string `json:"b" name:"b"`
CD *string `json:"c-d" name:"c-d"`
}
type SomeActionInput struct {
Date *time.Time `json:"Date" name:"Date" format:"RFC 822" location:"headers"`
IfModifiedSince *time.Time `json:"If-Modified-Since" name:"If-Modified-Since" format:"RFC 822" location:"headers"`
Range *string `json:"Range" name:"Range" location:"headers"`
UploadID *string `json:"upload_id" name:"upload_id" location:"params"`
Count *int `json:"count" name:"count" location:"elements"`
}
func (s *SomeActionInput) Validate() error {
return nil
}
type SomeActionOutput struct {
StatusCode *int `location:"statusCode"`
Error *errors.QingStorError
RequestID *string `location:"requestID"`
}
func String(v string) *string {
return &v
}
func Int(v int) *int {
return &v
}
func Time(v time.Time) *time.Time {
return &v
}
func TestRequest_Send(t *testing.T) {
conf, err := config.New("ACCESS_KEY_ID", "SECRET_ACCESS_KEY")
assert.Nil(t, err)
logger.SetLevel("warn")
operation := &data.Operation{
Config: conf,
Properties: &SomeActionProperties{
A: String("aaa"),
B: String("bbb"),
CD: String("ccc-ddd"),
},
APIName: "Some Action",
RequestMethod: "GET",
RequestURI: "/<a>/<b>/<c-d>",
StatusCodes: []int{
200, // OK
206, // Partial content
304, // Not modified
412, // Precondition failed
},
}
output := &SomeActionOutput{}
r, err := New(operation, &SomeActionInput{
Date: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, time.UTC)),
IfModifiedSince: Time(time.Date(2016, 9, 1, 15, 30, 0, 0, time.UTC)),
Range: String("100-"),
UploadID: String("0"),
Count: Int(23),
}, output)
assert.Nil(t, err)
err = r.build()
assert.Nil(t, err)
err = r.sign()
assert.Nil(t, err)
assert.Equal(t, r.HTTPRequest.URL.String(), "https://qingstor.com:443/aaa/bbb/ccc-ddd?upload_id=0")
assert.Equal(t, r.HTTPRequest.Header.Get("Range"), "100-")
assert.Equal(t, r.HTTPRequest.Header.Get("If-Modified-Since"), "Thu, 01 Sep 2016 15:30:00 GMT")
assert.Equal(t, r.HTTPRequest.Header.Get("Content-Length"), "12")
assert.Equal(t, r.HTTPRequest.Header.Get("Authorization"), "QS ACCESS_KEY_ID:pA7G9qo4iQ6YHu7p4fX9Wcg4V9S6Mcgvz7p/0wEdz78=")
httpResponse := &http.Response{Header: http.Header{}}
httpResponse.StatusCode = 400
httpResponse.Header.Set("Content-Type", "application/json")
responseString := `{
"code": "bad_request",
"message": "Invalid argument(s) or invalid argument value(s)",
"request_id": "1e588695254aa08cf7a43f612e6ce14b",
"url": "http://docs.qingcloud.com/object_storage/api/object/get.html"
}`
httpResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte(responseString)))
assert.Nil(t, err)
r.HTTPResponse = httpResponse
err = r.unpack()
assert.NotNil(t, err)
switch e := err.(type) {
case *errors.QingStorError:
assert.Equal(t, "bad_request", e.Code)
assert.Equal(t, "1e588695254aa08cf7a43f612e6ce14b", e.RequestID)
}
}

Some files were not shown because too many files have changed in this diff Show More